1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * KVM nVHE hypervisor stack tracing support.
5 * The unwinder implementation depends on the nVHE mode:
7 * 1) Non-protected nVHE mode - the host can directly access the
8 * HYP stack pages and unwind the HYP stack in EL1. This saves having
9 * to allocate shared buffers for the host to read the unwinded
12 * 2) pKVM (protected nVHE) mode - the host cannot directly access
13 * the HYP memory. The stack is unwinded in EL2 and dumped to a shared
14 * buffer where the host can read and print the stacktrace.
16 * Copyright (C) 2022 Google LLC
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
22 #include <asm/stacktrace/nvhe.h>
24 static struct stack_info
stackinfo_get_overflow(void)
26 struct kvm_nvhe_stacktrace_info
*stacktrace_info
27 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info
);
28 unsigned long low
= (unsigned long)stacktrace_info
->overflow_stack_base
;
29 unsigned long high
= low
+ OVERFLOW_STACK_SIZE
;
31 return (struct stack_info
) {
37 static struct stack_info
stackinfo_get_overflow_kern_va(void)
39 unsigned long low
= (unsigned long)this_cpu_ptr_nvhe_sym(overflow_stack
);
40 unsigned long high
= low
+ OVERFLOW_STACK_SIZE
;
42 return (struct stack_info
) {
48 static struct stack_info
stackinfo_get_hyp(void)
50 struct kvm_nvhe_stacktrace_info
*stacktrace_info
51 = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info
);
52 unsigned long low
= (unsigned long)stacktrace_info
->stack_base
;
53 unsigned long high
= low
+ PAGE_SIZE
;
55 return (struct stack_info
) {
61 static struct stack_info
stackinfo_get_hyp_kern_va(void)
63 unsigned long low
= (unsigned long)*this_cpu_ptr(&kvm_arm_hyp_stack_page
);
64 unsigned long high
= low
+ PAGE_SIZE
;
66 return (struct stack_info
) {
73 * kvm_nvhe_stack_kern_va - Convert KVM nVHE HYP stack addresses to a kernel VAs
75 * The nVHE hypervisor stack is mapped in the flexible 'private' VA range, to
76 * allow for guard pages below the stack. Consequently, the fixed offset address
77 * translation macros won't work here.
79 * The kernel VA is calculated as an offset from the kernel VA of the hypervisor
82 * Returns true on success and updates @addr to its corresponding kernel VA;
83 * otherwise returns false.
85 static bool kvm_nvhe_stack_kern_va(unsigned long *addr
, unsigned long size
)
87 struct stack_info stack_hyp
, stack_kern
;
89 stack_hyp
= stackinfo_get_hyp();
90 stack_kern
= stackinfo_get_hyp_kern_va();
91 if (stackinfo_on_stack(&stack_hyp
, *addr
, size
))
94 stack_hyp
= stackinfo_get_overflow();
95 stack_kern
= stackinfo_get_overflow_kern_va();
96 if (stackinfo_on_stack(&stack_hyp
, *addr
, size
))
102 *addr
= *addr
- stack_hyp
.low
+ stack_kern
.low
;
107 * Convert a KVN nVHE HYP frame record address to a kernel VA
109 static bool kvm_nvhe_stack_kern_record_va(unsigned long *addr
)
111 return kvm_nvhe_stack_kern_va(addr
, 16);
114 static int unwind_next(struct unwind_state
*state
)
117 * The FP is in the hypervisor VA space. Convert it to the kernel VA
118 * space so it can be unwound by the regular unwind functions.
120 if (!kvm_nvhe_stack_kern_record_va(&state
->fp
))
123 return unwind_next_frame_record(state
);
126 static void unwind(struct unwind_state
*state
,
127 stack_trace_consume_fn consume_entry
, void *cookie
)
132 if (!consume_entry(cookie
, state
->pc
))
134 ret
= unwind_next(state
);
141 * kvm_nvhe_dump_backtrace_entry - Symbolize and print an nVHE backtrace entry
143 * @arg : the hypervisor offset, used for address translation
144 * @where : the program counter corresponding to the stack frame
146 static bool kvm_nvhe_dump_backtrace_entry(void *arg
, unsigned long where
)
148 unsigned long va_mask
= GENMASK_ULL(vabits_actual
- 1, 0);
149 unsigned long hyp_offset
= (unsigned long)arg
;
151 /* Mask tags and convert to kern addr */
152 where
= (where
& va_mask
) + hyp_offset
;
153 kvm_err(" [<%016lx>] %pB\n", where
, (void *)(where
+ kaslr_offset()));
158 static void kvm_nvhe_dump_backtrace_start(void)
160 kvm_err("nVHE call trace:\n");
163 static void kvm_nvhe_dump_backtrace_end(void)
165 kvm_err("---[ end nVHE call trace ]---\n");
169 * hyp_dump_backtrace - Dump the non-protected nVHE backtrace.
171 * @hyp_offset: hypervisor offset, used for address translation.
173 * The host can directly access HYP stack pages in non-protected
174 * mode, so the unwinding is done directly from EL1. This removes
175 * the need for shared buffers between host and hypervisor for
178 static void hyp_dump_backtrace(unsigned long hyp_offset
)
180 struct kvm_nvhe_stacktrace_info
*stacktrace_info
;
181 struct stack_info stacks
[] = {
182 stackinfo_get_overflow_kern_va(),
183 stackinfo_get_hyp_kern_va(),
185 struct unwind_state state
= {
187 .nr_stacks
= ARRAY_SIZE(stacks
),
190 stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info
);
192 kvm_nvhe_unwind_init(&state
, stacktrace_info
->fp
, stacktrace_info
->pc
);
194 kvm_nvhe_dump_backtrace_start();
195 unwind(&state
, kvm_nvhe_dump_backtrace_entry
, (void *)hyp_offset
);
196 kvm_nvhe_dump_backtrace_end();
199 #ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
200 DECLARE_KVM_NVHE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE
/sizeof(long)],
204 * pkvm_dump_backtrace - Dump the protected nVHE HYP backtrace.
206 * @hyp_offset: hypervisor offset, used for address translation.
208 * Dumping of the pKVM HYP backtrace is done by reading the
209 * stack addresses from the shared stacktrace buffer, since the
210 * host cannot directly access hypervisor memory in protected
213 static void pkvm_dump_backtrace(unsigned long hyp_offset
)
215 unsigned long *stacktrace
216 = (unsigned long *) this_cpu_ptr_nvhe_sym(pkvm_stacktrace
);
219 kvm_nvhe_dump_backtrace_start();
220 /* The saved stacktrace is terminated by a null entry */
222 i
< ARRAY_SIZE(kvm_nvhe_sym(pkvm_stacktrace
)) && stacktrace
[i
];
224 kvm_nvhe_dump_backtrace_entry((void *)hyp_offset
, stacktrace
[i
]);
225 kvm_nvhe_dump_backtrace_end();
227 #else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
228 static void pkvm_dump_backtrace(unsigned long hyp_offset
)
230 kvm_err("Cannot dump pKVM nVHE stacktrace: !CONFIG_PROTECTED_NVHE_STACKTRACE\n");
232 #endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
235 * kvm_nvhe_dump_backtrace - Dump KVM nVHE hypervisor backtrace.
237 * @hyp_offset: hypervisor offset, used for address translation.
239 void kvm_nvhe_dump_backtrace(unsigned long hyp_offset
)
241 if (is_protected_kvm_enabled())
242 pkvm_dump_backtrace(hyp_offset
);
244 hyp_dump_backtrace(hyp_offset
);