0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019 #include <linux/kvm.h>
0020 #include <linux/kvm_host.h>
0021
0022 #include <asm/stacktrace/nvhe.h>
0023
0024
0025
0026
0027
0028
0029
0030
0031
0032
0033
0034
0035
0036
0037 static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
0038 enum stack_type type)
0039 {
0040 struct kvm_nvhe_stacktrace_info *stacktrace_info;
0041 unsigned long hyp_base, kern_base, hyp_offset;
0042
0043 stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0044
0045 switch (type) {
0046 case STACK_TYPE_HYP:
0047 kern_base = (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page);
0048 hyp_base = (unsigned long)stacktrace_info->stack_base;
0049 break;
0050 case STACK_TYPE_OVERFLOW:
0051 kern_base = (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack);
0052 hyp_base = (unsigned long)stacktrace_info->overflow_stack_base;
0053 break;
0054 default:
0055 return false;
0056 }
0057
0058 hyp_offset = *addr - hyp_base;
0059
0060 *addr = kern_base + hyp_offset;
0061
0062 return true;
0063 }
0064
0065 static bool on_overflow_stack(unsigned long sp, unsigned long size,
0066 struct stack_info *info)
0067 {
0068 struct kvm_nvhe_stacktrace_info *stacktrace_info
0069 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0070 unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
0071 unsigned long high = low + OVERFLOW_STACK_SIZE;
0072
0073 return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
0074 }
0075
0076 static bool on_hyp_stack(unsigned long sp, unsigned long size,
0077 struct stack_info *info)
0078 {
0079 struct kvm_nvhe_stacktrace_info *stacktrace_info
0080 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0081 unsigned long low = (unsigned long)stacktrace_info->stack_base;
0082 unsigned long high = low + PAGE_SIZE;
0083
0084 return on_stack(sp, size, low, high, STACK_TYPE_HYP, info);
0085 }
0086
0087 static bool on_accessible_stack(const struct task_struct *tsk,
0088 unsigned long sp, unsigned long size,
0089 struct stack_info *info)
0090 {
0091 if (info)
0092 info->type = STACK_TYPE_UNKNOWN;
0093
0094 return (on_overflow_stack(sp, size, info) ||
0095 on_hyp_stack(sp, size, info));
0096 }
0097
0098 static int unwind_next(struct unwind_state *state)
0099 {
0100 struct stack_info info;
0101
0102 return unwind_next_common(state, &info, on_accessible_stack,
0103 kvm_nvhe_stack_kern_va);
0104 }
0105
0106 static void unwind(struct unwind_state *state,
0107 stack_trace_consume_fn consume_entry, void *cookie)
0108 {
0109 while (1) {
0110 int ret;
0111
0112 if (!consume_entry(cookie, state->pc))
0113 break;
0114 ret = unwind_next(state);
0115 if (ret < 0)
0116 break;
0117 }
0118 }
0119
0120
0121
0122
0123
0124
0125
0126 static bool kvm_nvhe_dump_backtrace_entry(void *arg, unsigned long where)
0127 {
0128 unsigned long va_mask = GENMASK_ULL(vabits_actual - 1, 0);
0129 unsigned long hyp_offset = (unsigned long)arg;
0130
0131
0132 where = (where & va_mask) + hyp_offset;
0133 kvm_err(" [<%016lx>] %pB\n", where, (void *)(where + kaslr_offset()));
0134
0135 return true;
0136 }
0137
0138 static void kvm_nvhe_dump_backtrace_start(void)
0139 {
0140 kvm_err("nVHE call trace:\n");
0141 }
0142
0143 static void kvm_nvhe_dump_backtrace_end(void)
0144 {
0145 kvm_err("---[ end nVHE call trace ]---\n");
0146 }
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158 static void hyp_dump_backtrace(unsigned long hyp_offset)
0159 {
0160 struct kvm_nvhe_stacktrace_info *stacktrace_info;
0161 struct unwind_state state;
0162
0163 stacktrace_info = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
0164
0165 kvm_nvhe_unwind_init(&state, stacktrace_info->fp, stacktrace_info->pc);
0166
0167 kvm_nvhe_dump_backtrace_start();
0168 unwind(&state, kvm_nvhe_dump_backtrace_entry, (void *)hyp_offset);
0169 kvm_nvhe_dump_backtrace_end();
0170 }
0171
0172 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
0173 DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)],
0174 pkvm_stacktrace);
0175
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186 static void pkvm_dump_backtrace(unsigned long hyp_offset)
0187 {
0188 unsigned long *stacktrace
0189 = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace);
0190 int i;
0191
0192 kvm_nvhe_dump_backtrace_start();
0193
0194 for (i = 0;
0195 i < ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace)) && stacktrace[i];
0196 i++)
0197 kvm_nvhe_dump_backtrace_entry((void *)hyp_offset, stacktrace[i]);
0198 kvm_nvhe_dump_backtrace_end();
0199 }
0200 #else
0201 static void pkvm_dump_backtrace(unsigned long hyp_offset)
0202 {
0203 kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
0204 }
0205 #endif
0206
0207
0208
0209
0210
0211
0212 void kvm_nvhe_dump_backtrace(unsigned long hyp_offset)
0213 {
0214 if (is_protected_kvm_enabled())
0215 pkvm_dump_backtrace(hyp_offset);
0216 else
0217 hyp_dump_backtrace(hyp_offset);
0218 }