Back to home page

OSCL-LXR

 
 

    


0001 /* SPDX-License-Identifier: GPL-2.0-only */
0002 /*
0003  * KVM nVHE hypervisor stack tracing support.
0004  *
0005  * The unwinder implementation depends on the nVHE mode:
0006  *
0007  *   1) Non-protected nVHE mode - the host can directly access the
0008  *      HYP stack pages and unwind the HYP stack in EL1. This saves having
0009  *      to allocate shared buffers for the host to read the unwinded
0010  *      stacktrace.
0011  *
0012  *   2) pKVM (protected nVHE) mode - the host cannot directly access
0013  *      the HYP memory. The stack is unwinded in EL2 and dumped to a shared
0014  *      buffer where the host can read and print the stacktrace.
0015  *
0016  * Copyright (C) 2022 Google LLC
0017  */
0018 
0019 #include <linux/kvm.h>
0020 #include <linux/kvm_host.h>
0021 
0022 #include <asm/stacktrace/nvhe.h>
0023 
0024 /*
0025  * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
0026  *
0027  * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
0028  * allow for guard pages below the stack. Consequently, the fixed offset address
0029  * translation macros won't work here.
0030  *
0031  * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
0032  * stack base.
0033  *
0034  * Returns true on success and updates @addr to its corresponding kernel VA;
0035  * otherwise returns false.
0036  */
0037 static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
0038                    enum stack_type type)
0039 {
0040     struct kvm_nvhe_stacktrace_info *stacktrace_info;
0041     unsigned long hyp_base, kern_base, hyp_offset;
0042 
0043     stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0044 
0045     switch (type) {
0046     case STACK_TYPE_HYP:
0047         kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
0048         hyp_base = (unsigned long)stacktrace_info->stack_base;
0049         break;
0050     case STACK_TYPE_OVERFLOW:
0051         kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
0052         hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
0053         break;
0054     default:
0055         return false;
0056     }
0057 
0058     hyp_offset = *addr - hyp_base;
0059 
0060     *addr = kern_base + hyp_offset;
0061 
0062     return true;
0063 }
0064 
0065 static bool on_overflow_stack(unsigned long sp, unsigned long size,
0066                   struct stack_info *info)
0067 {
0068     struct kvm_nvhe_stacktrace_info *stacktrace_info
0069                 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0070     unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
0071     unsigned long high = low + OVERFLOW_STACK_SIZE;
0072 
0073     return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
0074 }
0075 
0076 static bool on_hyp_stack(unsigned long sp, unsigned long size,
0077              struct stack_info *info)
0078 {
0079     struct kvm_nvhe_stacktrace_info *stacktrace_info
0080                 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0081     unsigned long low = (unsigned long)stacktrace_info->stack_base;
0082     unsigned long high = low + PAGE_SIZE;
0083 
0084     return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
0085 }
0086 
0087 static bool on_accessible_stack(const struct task_struct *tsk,
0088                 unsigned long sp, unsigned long size,
0089                 struct stack_info *info)
0090 {
0091     if (info)
0092         info->type = STACK_TYPE_UNKNOWN;
0093 
0094     return (on_overflow_stack(sp, size, info) ||
0095         on_hyp_stack(sp, size, info));
0096 }
0097 
0098 static int unwind_next(struct unwind_state *state)
0099 {
0100     struct stack_info info;
0101 
0102     return unwind_next_common(state, &info, on_accessible_stack,
0103                   kvm_nvhe_stack_kern_va);
0104 }
0105 
0106 static void unwind(struct unwind_state *state,
0107            stack_trace_consume_fn consume_entry, void *cookie)
0108 {
0109     while (1) {
0110         int ret;
0111 
0112         if (!consume_entry(cookie, state->pc))
0113             break;
0114         ret = unwind_next(state);
0115         if (ret < 0)
0116             break;
0117     }
0118 }
0119 
0120 /*
0121  * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
0122  *
0123  * @arg    : the hypervisor offset, used for address translation
0124  * @where  : the program counter corresponding to the stack frame
0125  */
0126 static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
0127 {
0128     unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
0129     unsigned long hyp_offset = (unsigned long)arg;
0130 
0131     /* Mask tags and convert to kern addr */
0132     where = (where & va_mask) + hyp_offset;
0133     kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
0134 
0135     return true;
0136 }
0137 
0138 static void kvm_nvhe_dump_backtrace_start(void)
0139 {
0140     kvm_err("nVHE call trace:\n");
0141 }
0142 
0143 static void kvm_nvhe_dump_backtrace_end(void)
0144 {
0145     kvm_err("---[ end nVHE call trace ]---\n");
0146 }
0147 
0148 /*
0149  * hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
0150  *
0151  * @hyp_offset: hypervisor offset, used for address translation.
0152  *
0153  * The host can directly access HYP stack pages in non-protected
0154  * mode, so the unwinding is done directly from EL1. This removes
0155  * the need for shared buffers between host and hypervisor for
0156  * the stacktrace.
0157  */
0158 static void hyp_dump_backtrace(unsigned long hyp_offset)
0159 {
0160     struct kvm_nvhe_stacktrace_info *stacktrace_info;
0161     struct unwind_state state;
0162 
0163     stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0164 
0165     kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
0166 
0167     kvm_nvhe_dump_backtrace_start();
0168     unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
0169     kvm_nvhe_dump_backtrace_end();
0170 }
0171 
0172 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
0173 DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
0174              pkvm_stacktrace);
0175 
0176 /*
0177  * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
0178  *
0179  * @hyp_offset: hypervisor offset, used for address translation.
0180  *
0181  * Dumping of the pKVM HYP backtrace is done by reading the
0182  * stack addresses from the shared stacktrace buffer, since the
0183  * host cannot directly access hypervisor memory in protected
0184  * mode.
0185  */
0186 static void pkvm_dump_backtrace(unsigned long hyp_offset)
0187 {
0188     unsigned long *stacktrace
0189         = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
0190     int i;
0191 
0192     kvm_nvhe_dump_backtrace_start();
0193     /* The saved stacktrace is terminated by a null entry */
0194     for (i = 0;
0195          i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
0196          i++)
0197         kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
0198     kvm_nvhe_dump_backtrace_end();
0199 }
0200 #else   /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
0201 static void pkvm_dump_backtrace(unsigned long hyp_offset)
0202 {
0203     kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
0204 }
0205 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
0206 
0207 /*
0208  * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
0209  *
0210  * @hyp_offset: hypervisor offset, used for address translation.
0211  */
0212 void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
0213 {
0214     if (is_protected_kvm_enabled())
0215         pkvm_dump_backtrace(hyp_offset);
0216     else
0217         hyp_dump_backtrace(hyp_offset);
0218 }