Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright IBM Corp. 2019
0004  */
0005 #include <linux/pgtable.h>
0006 #include <asm/mem_detect.h>
0007 #include <asm/cpacf.h>
0008 #include <asm/timex.h>
0009 #include <asm/sclp.h>
0010 #include <asm/kasan.h>
0011 #include "decompressor.h"
0012 #include "boot.h"
0013 
0014 #define PRNG_MODE_TDES   1
0015 #define PRNG_MODE_SHA512 2
0016 #define PRNG_MODE_TRNG   3
0017 
0018 struct prno_parm {
0019     u32 res;
0020     u32 reseed_counter;
0021     u64 stream_bytes;
0022     u8  V[112];
0023     u8  C[112];
0024 };
0025 
0026 struct prng_parm {
0027     u8  parm_block[32];
0028     u32 reseed_counter;
0029     u64 byte_counter;
0030 };
0031 
0032 static int check_prng(void)
0033 {
0034     if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
0035         sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
0036         return 0;
0037     }
0038     if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
0039         return PRNG_MODE_TRNG;
0040     if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
0041         return PRNG_MODE_SHA512;
0042     else
0043         return PRNG_MODE_TDES;
0044 }
0045 
0046 static int get_random(unsigned long limit, unsigned long *value)
0047 {
0048     struct prng_parm prng = {
0049         /* initial parameter block for tdes mode, copied from libica */
0050         .parm_block = {
0051             0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
0052             0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
0053             0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
0054             0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
0055         },
0056     };
0057     unsigned long seed, random;
0058     struct prno_parm prno;
0059     __u64 entropy[4];
0060     int mode, i;
0061 
0062     mode = check_prng();
0063     seed = get_tod_clock_fast();
0064     switch (mode) {
0065     case PRNG_MODE_TRNG:
0066         cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
0067         break;
0068     case PRNG_MODE_SHA512:
0069         cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
0070                (u8 *) &seed, sizeof(seed));
0071         cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
0072                sizeof(random), NULL, 0);
0073         break;
0074     case PRNG_MODE_TDES:
0075         /* add entropy */
0076         *(unsigned long *) prng.parm_block ^= seed;
0077         for (i = 0; i < 16; i++) {
0078             cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
0079                   (u8 *) entropy, (u8 *) entropy,
0080                   sizeof(entropy));
0081             memcpy(prng.parm_block, entropy, sizeof(entropy));
0082         }
0083         random = seed;
0084         cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
0085               (u8 *) &random, sizeof(random));
0086         break;
0087     default:
0088         return -1;
0089     }
0090     *value = random % limit;
0091     return 0;
0092 }
0093 
0094 /*
0095  * To randomize kernel base address we have to consider several facts:
0096  * 1. physical online memory might not be continuous and have holes. mem_detect
0097  *    info contains list of online memory ranges we should consider.
0098  * 2. we have several memory regions which are occupied and we should not
0099  *    overlap and destroy them. Currently safe_addr tells us the border below
0100  *    which all those occupied regions are. We are safe to use anything above
0101  *    safe_addr.
0102  * 3. the upper limit might apply as well, even if memory above that limit is
0103  *    online. Currently those limitations are:
0104  *    3.1. Limit set by "mem=" kernel command line option
0105  *    3.2. memory reserved at the end for kasan initialization.
0106  * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
0107  *    Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
0108  *    (16 pages when the kernel is built with kasan enabled)
0109  * Assumptions:
0110  * 1. kernel size (including .bss size) and upper memory limit are page aligned.
0111  * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
0112  *    aligned (in practice memory configurations granularity on z/VM and LPAR
0113  *    is 1mb).
0114  *
0115  * To guarantee uniform distribution of kernel base address among all suitable
0116  * addresses we generate random value just once. For that we need to build a
0117  * continuous range in which every value would be suitable. We can build this
0118  * range by simply counting all suitable addresses (let's call them positions)
0119  * which would be valid as kernel base address. To count positions we iterate
0120  * over online memory ranges. For each range which is big enough for the
0121  * kernel image we count all suitable addresses we can put the kernel image at
0122  * that is
0123  * (end - start - kernel_size) / THREAD_SIZE + 1
0124  * Two functions count_valid_kernel_positions and position_to_address help
0125  * to count positions in memory range given and then convert position back
0126  * to address.
0127  */
0128 static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
0129                           unsigned long _min,
0130                           unsigned long _max)
0131 {
0132     unsigned long start, end, pos = 0;
0133     int i;
0134 
0135     for_each_mem_detect_block(i, &start, &end) {
0136         if (_min >= end)
0137             continue;
0138         if (start >= _max)
0139             break;
0140         start = max(_min, start);
0141         end = min(_max, end);
0142         if (end - start < kernel_size)
0143             continue;
0144         pos += (end - start - kernel_size) / THREAD_SIZE + 1;
0145     }
0146 
0147     return pos;
0148 }
0149 
0150 static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
0151                  unsigned long _min, unsigned long _max)
0152 {
0153     unsigned long start, end;
0154     int i;
0155 
0156     for_each_mem_detect_block(i, &start, &end) {
0157         if (_min >= end)
0158             continue;
0159         if (start >= _max)
0160             break;
0161         start = max(_min, start);
0162         end = min(_max, end);
0163         if (end - start < kernel_size)
0164             continue;
0165         if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
0166             return start + (pos - 1) * THREAD_SIZE;
0167         pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
0168     }
0169 
0170     return 0;
0171 }
0172 
0173 unsigned long get_random_base(unsigned long safe_addr)
0174 {
0175     unsigned long memory_limit = get_mem_detect_end();
0176     unsigned long base_pos, max_pos, kernel_size;
0177     unsigned long kasan_needs;
0178     int i;
0179 
0180     memory_limit = min(memory_limit, ident_map_size);
0181 
0182     /*
0183      * Avoid putting kernel in the end of physical memory
0184      * which kasan will use for shadow memory and early pgtable
0185      * mapping allocations.
0186      */
0187     memory_limit -= kasan_estimate_memory_needs(memory_limit);
0188 
0189     if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size) {
0190         if (safe_addr < initrd_data.start + initrd_data.size)
0191             safe_addr = initrd_data.start + initrd_data.size;
0192     }
0193     safe_addr = ALIGN(safe_addr, THREAD_SIZE);
0194 
0195     kernel_size = vmlinux.image_size + vmlinux.bss_size;
0196     if (safe_addr + kernel_size > memory_limit)
0197         return 0;
0198 
0199     max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
0200     if (!max_pos) {
0201         sclp_early_printk("KASLR disabled: not enough memory\n");
0202         return 0;
0203     }
0204 
0205     /* we need a value in the range [1, base_pos] inclusive */
0206     if (get_random(max_pos, &base_pos))
0207         return 0;
0208     return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
0209 }