Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 #include <linux/kasan.h>
0003 #include <linux/sched/task.h>
0004 #include <linux/memblock.h>
0005 #include <linux/pgtable.h>
0006 #include <asm/pgalloc.h>
0007 #include <asm/kasan.h>
0008 #include <asm/mem_detect.h>
0009 #include <asm/processor.h>
0010 #include <asm/sclp.h>
0011 #include <asm/facility.h>
0012 #include <asm/sections.h>
0013 #include <asm/setup.h>
0014 #include <asm/uv.h>
0015 
0016 static unsigned long segment_pos __initdata;
0017 static unsigned long segment_low __initdata;
0018 static unsigned long pgalloc_pos __initdata;
0019 static unsigned long pgalloc_low __initdata;
0020 static unsigned long pgalloc_freeable __initdata;
0021 static bool has_edat __initdata;
0022 static bool has_nx __initdata;
0023 
0024 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
0025 
0026 static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
0027 
0028 static void __init kasan_early_panic(const char *reason)
0029 {
0030     sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
0031     sclp_early_printk(reason);
0032     disabled_wait();
0033 }
0034 
0035 static void * __init kasan_early_alloc_segment(void)
0036 {
0037     segment_pos -= _SEGMENT_SIZE;
0038 
0039     if (segment_pos < segment_low)
0040         kasan_early_panic("out of memory during initialisation\n");
0041 
0042     return (void *)segment_pos;
0043 }
0044 
0045 static void * __init kasan_early_alloc_pages(unsigned int order)
0046 {
0047     pgalloc_pos -= (PAGE_SIZE << order);
0048 
0049     if (pgalloc_pos < pgalloc_low)
0050         kasan_early_panic("out of memory during initialisation\n");
0051 
0052     return (void *)pgalloc_pos;
0053 }
0054 
0055 static void * __init kasan_early_crst_alloc(unsigned long val)
0056 {
0057     unsigned long *table;
0058 
0059     table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
0060     if (table)
0061         crst_table_init(table, val);
0062     return table;
0063 }
0064 
0065 static pte_t * __init kasan_early_pte_alloc(void)
0066 {
0067     static void *pte_leftover;
0068     pte_t *pte;
0069 
0070     BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
0071 
0072     if (!pte_leftover) {
0073         pte_leftover = kasan_early_alloc_pages(0);
0074         pte = pte_leftover + _PAGE_TABLE_SIZE;
0075     } else {
0076         pte = pte_leftover;
0077         pte_leftover = NULL;
0078     }
0079     memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
0080     return pte;
0081 }
0082 
0083 enum populate_mode {
0084     POPULATE_ONE2ONE,
0085     POPULATE_MAP,
0086     POPULATE_ZERO_SHADOW,
0087     POPULATE_SHALLOW
0088 };
0089 static void __init kasan_early_pgtable_populate(unsigned long address,
0090                         unsigned long end,
0091                         enum populate_mode mode)
0092 {
0093     unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
0094     pgd_t *pg_dir;
0095     p4d_t *p4_dir;
0096     pud_t *pu_dir;
0097     pmd_t *pm_dir;
0098     pte_t *pt_dir;
0099 
0100     pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
0101     if (!has_nx)
0102         pgt_prot_zero &= ~_PAGE_NOEXEC;
0103     pgt_prot = pgprot_val(PAGE_KERNEL);
0104     sgt_prot = pgprot_val(SEGMENT_KERNEL);
0105     if (!has_nx || mode == POPULATE_ONE2ONE) {
0106         pgt_prot &= ~_PAGE_NOEXEC;
0107         sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
0108     }
0109 
0110     /*
0111      * The first 1MB of 1:1 mapping is mapped with 4KB pages
0112      */
0113     while (address < end) {
0114         pg_dir = pgd_offset_k(address);
0115         if (pgd_none(*pg_dir)) {
0116             if (mode == POPULATE_ZERO_SHADOW &&
0117                 IS_ALIGNED(address, PGDIR_SIZE) &&
0118                 end - address >= PGDIR_SIZE) {
0119                 pgd_populate(&init_mm, pg_dir,
0120                         kasan_early_shadow_p4d);
0121                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
0122                 continue;
0123             }
0124             p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
0125             pgd_populate(&init_mm, pg_dir, p4_dir);
0126         }
0127 
0128         if (mode == POPULATE_SHALLOW) {
0129             address = (address + P4D_SIZE) & P4D_MASK;
0130             continue;
0131         }
0132 
0133         p4_dir = p4d_offset(pg_dir, address);
0134         if (p4d_none(*p4_dir)) {
0135             if (mode == POPULATE_ZERO_SHADOW &&
0136                 IS_ALIGNED(address, P4D_SIZE) &&
0137                 end - address >= P4D_SIZE) {
0138                 p4d_populate(&init_mm, p4_dir,
0139                         kasan_early_shadow_pud);
0140                 address = (address + P4D_SIZE) & P4D_MASK;
0141                 continue;
0142             }
0143             pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
0144             p4d_populate(&init_mm, p4_dir, pu_dir);
0145         }
0146 
0147         pu_dir = pud_offset(p4_dir, address);
0148         if (pud_none(*pu_dir)) {
0149             if (mode == POPULATE_ZERO_SHADOW &&
0150                 IS_ALIGNED(address, PUD_SIZE) &&
0151                 end - address >= PUD_SIZE) {
0152                 pud_populate(&init_mm, pu_dir,
0153                         kasan_early_shadow_pmd);
0154                 address = (address + PUD_SIZE) & PUD_MASK;
0155                 continue;
0156             }
0157             pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
0158             pud_populate(&init_mm, pu_dir, pm_dir);
0159         }
0160 
0161         pm_dir = pmd_offset(pu_dir, address);
0162         if (pmd_none(*pm_dir)) {
0163             if (IS_ALIGNED(address, PMD_SIZE) &&
0164                 end - address >= PMD_SIZE) {
0165                 if (mode == POPULATE_ZERO_SHADOW) {
0166                     pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
0167                     address = (address + PMD_SIZE) & PMD_MASK;
0168                     continue;
0169                 } else if (has_edat && address) {
0170                     void *page;
0171 
0172                     if (mode == POPULATE_ONE2ONE) {
0173                         page = (void *)address;
0174                     } else {
0175                         page = kasan_early_alloc_segment();
0176                         memset(page, 0, _SEGMENT_SIZE);
0177                     }
0178                     set_pmd(pm_dir, __pmd(__pa(page) | sgt_prot));
0179                     address = (address + PMD_SIZE) & PMD_MASK;
0180                     continue;
0181                 }
0182             }
0183             pt_dir = kasan_early_pte_alloc();
0184             pmd_populate(&init_mm, pm_dir, pt_dir);
0185         } else if (pmd_large(*pm_dir)) {
0186             address = (address + PMD_SIZE) & PMD_MASK;
0187             continue;
0188         }
0189 
0190         pt_dir = pte_offset_kernel(pm_dir, address);
0191         if (pte_none(*pt_dir)) {
0192             void *page;
0193 
0194             switch (mode) {
0195             case POPULATE_ONE2ONE:
0196                 page = (void *)address;
0197                 set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
0198                 break;
0199             case POPULATE_MAP:
0200                 page = kasan_early_alloc_pages(0);
0201                 memset(page, 0, PAGE_SIZE);
0202                 set_pte(pt_dir, __pte(__pa(page) | pgt_prot));
0203                 break;
0204             case POPULATE_ZERO_SHADOW:
0205                 page = kasan_early_shadow_page;
0206                 set_pte(pt_dir, __pte(__pa(page) | pgt_prot_zero));
0207                 break;
0208             case POPULATE_SHALLOW:
0209                 /* should never happen */
0210                 break;
0211             }
0212         }
0213         address += PAGE_SIZE;
0214     }
0215 }
0216 
0217 static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
0218 {
0219     unsigned long asce_bits;
0220 
0221     asce_bits = asce_type | _ASCE_TABLE_LENGTH;
0222     S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
0223     S390_lowcore.user_asce = S390_lowcore.kernel_asce;
0224 
0225     __ctl_load(S390_lowcore.kernel_asce, 1, 1);
0226     __ctl_load(S390_lowcore.kernel_asce, 7, 7);
0227     __ctl_load(S390_lowcore.kernel_asce, 13, 13);
0228 }
0229 
0230 static void __init kasan_enable_dat(void)
0231 {
0232     psw_t psw;
0233 
0234     psw.mask = __extract_psw();
0235     psw_bits(psw).dat = 1;
0236     psw_bits(psw).as = PSW_BITS_AS_HOME;
0237     __load_psw_mask(psw.mask);
0238 }
0239 
0240 static void __init kasan_early_detect_facilities(void)
0241 {
0242     if (test_facility(8)) {
0243         has_edat = true;
0244         __ctl_set_bit(0, 23);
0245     }
0246     if (!noexec_disabled && test_facility(130)) {
0247         has_nx = true;
0248         __ctl_set_bit(0, 20);
0249     }
0250 }
0251 
0252 void __init kasan_early_init(void)
0253 {
0254     unsigned long shadow_alloc_size;
0255     unsigned long initrd_end;
0256     unsigned long memsize;
0257     unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
0258     pte_t pte_z;
0259     pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
0260     pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
0261     p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
0262 
0263     kasan_early_detect_facilities();
0264     if (!has_nx)
0265         pgt_prot &= ~_PAGE_NOEXEC;
0266     pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
0267 
0268     memsize = get_mem_detect_end();
0269     if (!memsize)
0270         kasan_early_panic("cannot detect physical memory size\n");
0271     /*
0272      * Kasan currently supports standby memory but only if it follows
0273      * online memory (default allocation), i.e. no memory holes.
0274      * - memsize represents end of online memory
0275      * - ident_map_size represents online + standby and memory limits
0276      *   accounted.
0277      * Kasan maps "memsize" right away.
0278      * [0, memsize]         - as identity mapping
0279      * [__sha(0), __sha(memsize)]   - shadow memory for identity mapping
0280      * The rest [memsize, ident_map_size] if memsize < ident_map_size
0281      * could be mapped/unmapped dynamically later during memory hotplug.
0282      */
0283     memsize = min(memsize, ident_map_size);
0284 
0285     BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
0286     BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
0287     crst_table_init((unsigned long *)early_pg_dir, _REGION2_ENTRY_EMPTY);
0288 
0289     /* init kasan zero shadow */
0290     crst_table_init((unsigned long *)kasan_early_shadow_p4d,
0291                 p4d_val(p4d_z));
0292     crst_table_init((unsigned long *)kasan_early_shadow_pud,
0293                 pud_val(pud_z));
0294     crst_table_init((unsigned long *)kasan_early_shadow_pmd,
0295                 pmd_val(pmd_z));
0296     memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
0297 
0298     shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
0299     pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
0300     if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
0301         initrd_end =
0302             round_up(initrd_data.start + initrd_data.size, _SEGMENT_SIZE);
0303         pgalloc_low = max(pgalloc_low, initrd_end);
0304     }
0305 
0306     if (pgalloc_low + shadow_alloc_size > memsize)
0307         kasan_early_panic("out of memory during initialisation\n");
0308 
0309     if (has_edat) {
0310         segment_pos = round_down(memsize, _SEGMENT_SIZE);
0311         segment_low = segment_pos - shadow_alloc_size;
0312         pgalloc_pos = segment_low;
0313     } else {
0314         pgalloc_pos = memsize;
0315     }
0316     init_mm.pgd = early_pg_dir;
0317     /*
0318      * Current memory layout:
0319      * +- 0 -------------+     +- shadow start -+
0320      * | 1:1 ram mapping |    /| 1/8 ram        |
0321      * |             |   / |            |
0322      * +- end of ram ----+  /  +----------------+
0323      * | ... gap ...     | /   |            |
0324      * |             |/    |    kasan       |
0325      * +- shadow start --+     |    zero        |
0326      * | 1/8 addr space  |     |    page        |
0327      * +- shadow end    -+     |    mapping     |
0328      * | ... gap ...     |\    |  (untracked)   |
0329      * +- vmalloc area  -+ \   |            |
0330      * | vmalloc_size    |  \  |            |
0331      * +- modules vaddr -+   \ +----------------+
0332      * | 2Gb         |    \|      unmapped  | allocated per module
0333      * +-----------------+     +- shadow end ---+
0334      *
0335      * Current memory layout (KASAN_VMALLOC):
0336      * +- 0 -------------+     +- shadow start -+
0337      * | 1:1 ram mapping |    /| 1/8 ram        |
0338      * |             |   / |            |
0339      * +- end of ram ----+  /  +----------------+
0340      * | ... gap ...     | /   |    kasan       |
0341      * |             |/    |    zero        |
0342      * +- shadow start --+     |    page        |
0343      * | 1/8 addr space  |     |    mapping     |
0344      * +- shadow end    -+     |  (untracked)   |
0345      * | ... gap ...     |\    |            |
0346      * +- vmalloc area  -+ \   +- vmalloc area -+
0347      * | vmalloc_size    |  \  |shallow populate|
0348      * +- modules vaddr -+   \ +- modules area -+
0349      * | 2Gb         |    \|shallow populate|
0350      * +-----------------+     +- shadow end ---+
0351      */
0352     /* populate kasan shadow (for identity mapping and zero page mapping) */
0353     kasan_early_pgtable_populate(__sha(0), __sha(memsize), POPULATE_MAP);
0354     if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
0355         /* shallowly populate kasan shadow for vmalloc and modules */
0356         kasan_early_pgtable_populate(__sha(VMALLOC_START), __sha(MODULES_END),
0357                          POPULATE_SHALLOW);
0358     }
0359     /* populate kasan shadow for untracked memory */
0360     kasan_early_pgtable_populate(__sha(ident_map_size),
0361                      IS_ENABLED(CONFIG_KASAN_VMALLOC) ?
0362                            __sha(VMALLOC_START) :
0363                            __sha(MODULES_VADDR),
0364                      POPULATE_ZERO_SHADOW);
0365     kasan_early_pgtable_populate(__sha(MODULES_END), __sha(_REGION1_SIZE),
0366                      POPULATE_ZERO_SHADOW);
0367     /* memory allocated for identity mapping structs will be freed later */
0368     pgalloc_freeable = pgalloc_pos;
0369     /* populate identity mapping */
0370     kasan_early_pgtable_populate(0, memsize, POPULATE_ONE2ONE);
0371     kasan_set_pgd(early_pg_dir, _ASCE_TYPE_REGION2);
0372     kasan_enable_dat();
0373     /* enable kasan */
0374     init_task.kasan_depth = 0;
0375     memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
0376     sclp_early_printk("KernelAddressSanitizer initialized\n");
0377 }
0378 
0379 void __init kasan_copy_shadow_mapping(void)
0380 {
0381     /*
0382      * At this point we are still running on early pages setup early_pg_dir,
0383      * while swapper_pg_dir has just been initialized with identity mapping.
0384      * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
0385      */
0386 
0387     pgd_t *pg_dir_src;
0388     pgd_t *pg_dir_dst;
0389     p4d_t *p4_dir_src;
0390     p4d_t *p4_dir_dst;
0391 
0392     pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
0393     pg_dir_dst = pgd_offset_raw(init_mm.pgd, KASAN_SHADOW_START);
0394     p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
0395     p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
0396     memcpy(p4_dir_dst, p4_dir_src,
0397            (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
0398 }
0399 
0400 void __init kasan_free_early_identity(void)
0401 {
0402     memblock_phys_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
0403 }