Back to home page

OSCL-LXR

 
 

    


0001 // SPDX-License-Identifier: GPL-2.0
0002 /*
0003  * Copyright (C) 2020 - Google LLC
0004  * Author: Quentin Perret <qperret@google.com>
0005  */
0006 
0007 #include <linux/kvm_host.h>
0008 #include <linux/memblock.h>
0009 #include <linux/sort.h>
0010 
0011 #include <asm/kvm_pkvm.h>
0012 
0013 #include "hyp_constants.h"
0014 
0015 static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
0016 static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
0017 
0018 phys_addr_t hyp_mem_base;
0019 phys_addr_t hyp_mem_size;
0020 
0021 static int cmp_hyp_memblock(const void *p1, const void *p2)
0022 {
0023     const struct memblock_region *r1 = p1;
0024     const struct memblock_region *r2 = p2;
0025 
0026     return r1->base < r2->base ? -1 : (r1->base > r2->base);
0027 }
0028 
0029 static void __init sort_memblock_regions(void)
0030 {
0031     sort(hyp_memory,
0032          *hyp_memblock_nr_ptr,
0033          sizeof(struct memblock_region),
0034          cmp_hyp_memblock,
0035          NULL);
0036 }
0037 
0038 static int __init register_memblock_regions(void)
0039 {
0040     struct memblock_region *reg;
0041 
0042     for_each_mem_region(reg) {
0043         if (*hyp_memblock_nr_ptr >= HYP_MEMBLOCK_REGIONS)
0044             return -ENOMEM;
0045 
0046         hyp_memory[*hyp_memblock_nr_ptr] = *reg;
0047         (*hyp_memblock_nr_ptr)++;
0048     }
0049     sort_memblock_regions();
0050 
0051     return 0;
0052 }
0053 
0054 void __init kvm_hyp_reserve(void)
0055 {
0056     u64 nr_pages, prev, hyp_mem_pages = 0;
0057     int ret;
0058 
0059     if (!is_hyp_mode_available() || is_kernel_in_hyp_mode())
0060         return;
0061 
0062     if (kvm_get_mode() != KVM_MODE_PROTECTED)
0063         return;
0064 
0065     ret = register_memblock_regions();
0066     if (ret) {
0067         *hyp_memblock_nr_ptr = 0;
0068         kvm_err("Failed to register hyp memblocks: %d\n", ret);
0069         return;
0070     }
0071 
0072     hyp_mem_pages += hyp_s1_pgtable_pages();
0073     hyp_mem_pages += host_s2_pgtable_pages();
0074 
0075     /*
0076      * The hyp_vmemmap needs to be backed by pages, but these pages
0077      * themselves need to be present in the vmemmap, so compute the number
0078      * of pages needed by looking for a fixed point.
0079      */
0080     nr_pages = 0;
0081     do {
0082         prev = nr_pages;
0083         nr_pages = hyp_mem_pages + prev;
0084         nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
0085                     PAGE_SIZE);
0086         nr_pages += __hyp_pgtable_max_pages(nr_pages);
0087     } while (nr_pages != prev);
0088     hyp_mem_pages += nr_pages;
0089 
0090     /*
0091      * Try to allocate a PMD-aligned region to reduce TLB pressure once
0092      * this is unmapped from the host stage-2, and fallback to PAGE_SIZE.
0093      */
0094     hyp_mem_size = hyp_mem_pages << PAGE_SHIFT;
0095     hyp_mem_base = memblock_phys_alloc(ALIGN(hyp_mem_size, PMD_SIZE),
0096                        PMD_SIZE);
0097     if (!hyp_mem_base)
0098         hyp_mem_base = memblock_phys_alloc(hyp_mem_size, PAGE_SIZE);
0099     else
0100         hyp_mem_size = ALIGN(hyp_mem_size, PMD_SIZE);
0101 
0102     if (!hyp_mem_base) {
0103         kvm_err("Failed to reserve hyp memory\n");
0104         return;
0105     }
0106 
0107     kvm_info("Reserved %lld MiB at 0x%llx\n", hyp_mem_size >> 20,
0108          hyp_mem_base);
0109 }