hyp-entry.S 7.22 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2
/*
3
 * Copyright (C) 2015-2018 - ARM Ltd
4
5
6
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

7
#include <linux/arm-smccc.h>
8
9
10
11
12
13
14
15
#include <linux/linkage.h>

#include <asm/alternative.h>
#include <asm/assembler.h>
#include <asm/cpufeature.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
16
#include <asm/mmu.h>
17

18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
.macro save_caller_saved_regs_vect
	/* x0 and x1 were saved in the vector entry */
	stp	x2, x3,   [sp, #-16]!
	stp	x4, x5,   [sp, #-16]!
	stp	x6, x7,   [sp, #-16]!
	stp	x8, x9,   [sp, #-16]!
	stp	x10, x11, [sp, #-16]!
	stp	x12, x13, [sp, #-16]!
	stp	x14, x15, [sp, #-16]!
	stp	x16, x17, [sp, #-16]!
.endm

.macro restore_caller_saved_regs_vect
	ldp	x16, x17, [sp], #16
	ldp	x14, x15, [sp], #16
	ldp	x12, x13, [sp], #16
	ldp	x10, x11, [sp], #16
	ldp	x8, x9,   [sp], #16
	ldp	x6, x7,   [sp], #16
	ldp	x4, x5,   [sp], #16
	ldp	x2, x3,   [sp], #16
	ldp	x0, x1,   [sp], #16
.endm

42
43
	.text

44
45
46
47
48
.macro do_el2_call
	/*
	 * Shuffle the parameters before calling the function
	 * pointed to in x0. Assumes parameters in x[1,2,3].
	 */
49
	str	lr, [sp, #-16]!
50
51
52
53
54
	mov	lr, x0
	mov	x0, x1
	mov	x1, x2
	mov	x2, x3
	blr	lr
55
	ldr	lr, [sp], #16
56
57
.endm

58
59
el1_sync:				// Guest trapped into EL2

60
61
	mrs	x0, esr_el2
	lsr	x0, x0, #ESR_ELx_EC_SHIFT
62
	cmp	x0, #ESR_ELx_EC_HVC64
63
	ccmp	x0, #ESR_ELx_EC_HVC32, #4, ne
64
65
	b.ne	el1_trap

66
#ifdef __KVM_NVHE_HYPERVISOR__
67
68
	mrs	x1, vttbr_el2		// If vttbr is valid, the guest
	cbnz	x1, el1_hvc_guest	// called HVC
69
70

	/* Here, we're pretty sure the host called HVC. */
71
	ldp	x0, x1, [sp], #16
72

73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
	/* Check for a stub HVC call */
	cmp	x0, #HVC_STUB_HCALL_NR
	b.hs	1f

	/*
	 * Compute the idmap address of __kvm_handle_stub_hvc and
	 * jump there. Since we use kimage_voffset, do not use the
	 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
	 * (by loading it from the constant pool).
	 *
	 * Preserve x0-x4, which may contain stub parameters.
	 */
	ldr	x5, =__kvm_handle_stub_hvc
	ldr_l	x6, kimage_voffset

	/* x5 = __pa(x5) */
	sub	x5, x5, x6
	br	x5
91

92
1:
93
	/*
94
	 * Perform the EL2 call
95
96
	 */
	kern_hyp_va	x0
97
	do_el2_call
98

99
	eret
100
	sb
101
#endif /* __KVM_NVHE_HYPERVISOR__ */
102

103
104
105
106
107
108
109
110
111
el1_hvc_guest:
	/*
	 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
	 * The workaround has already been applied on the host,
	 * so let's quickly get back to the guest. We don't bother
	 * restoring x1, as it can be clobbered anyway.
	 */
	ldr	x1, [sp]				// Guest's x0
	eor	w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
112
113
114
115
116
	cbz	w1, wa_epilogue

	/* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
	eor	w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
			  ARM_SMCCC_ARCH_WORKAROUND_2)
117
	cbnz	w1, el1_trap
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149

#ifdef CONFIG_ARM64_SSBD
alternative_cb	arm64_enable_wa2_handling
	b	wa2_end
alternative_cb_end
	get_vcpu_ptr	x2, x0
	ldr	x0, [x2, #VCPU_WORKAROUND_FLAGS]

	// Sanitize the argument and update the guest flags
	ldr	x1, [sp, #8]			// Guest's x1
	clz	w1, w1				// Murphy's device:
	lsr	w1, w1, #5			// w1 = !!w1 without using
	eor	w1, w1, #1			// the flags...
	bfi	x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
	str	x0, [x2, #VCPU_WORKAROUND_FLAGS]

	/* Check that we actually need to perform the call */
	hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
	cbz	x0, wa2_end

	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
	smc	#0

	/* Don't leak data from the SMC call */
	mov	x3, xzr
wa2_end:
	mov	x2, xzr
	mov	x1, xzr
#endif

wa_epilogue:
	mov	x0, xzr
150
151
	add	sp, sp, #16
	eret
152
	sb
153

154
el1_trap:
155
	get_vcpu_ptr	x1, x0
156
	mov	x0, #ARM_EXCEPTION_TRAP
157
158
159
	b	__guest_exit

el1_irq:
160
	get_vcpu_ptr	x1, x0
161
	mov	x0, #ARM_EXCEPTION_IRQ
162
163
	b	__guest_exit

164
el1_error:
165
	get_vcpu_ptr	x1, x0
166
167
168
	mov	x0, #ARM_EXCEPTION_EL1_SERROR
	b	__guest_exit

169
el2_sync:
170
	/* Check for illegal exception return */
171
	mrs	x0, spsr_el2
172
	tbnz	x0, #20, 1f
173

174
175
176
177
178
179
180
	save_caller_saved_regs_vect
	stp     x29, x30, [sp, #-16]!
	bl	kvm_unexpected_el2_exception
	ldp     x29, x30, [sp], #16
	restore_caller_saved_regs_vect

	eret
181

182
1:
183
184
185
186
187
188
	/* Let's attempt a recovery from the illegal exception return */
	get_vcpu_ptr	x1, x0
	mov	x0, #ARM_EXCEPTION_IL
	b	__guest_exit


189
el2_error:
190
191
192
193
194
195
196
	save_caller_saved_regs_vect
	stp     x29, x30, [sp, #-16]!

	bl	kvm_unexpected_el2_exception

	ldp     x29, x30, [sp], #16
	restore_caller_saved_regs_vect
197

198
	eret
199
	sb
200

201
#ifdef __KVM_NVHE_HYPERVISOR__
202
SYM_FUNC_START(__hyp_do_panic)
Marc Zyngier's avatar
Marc Zyngier committed
203
204
205
206
207
208
	mov	lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
		      PSR_MODE_EL1h)
	msr	spsr_el2, lr
	ldr	lr, =panic
	msr	elr_el2, lr
	eret
209
	sb
210
SYM_FUNC_END(__hyp_do_panic)
211
#endif
Marc Zyngier's avatar
Marc Zyngier committed
212

213
SYM_CODE_START(__hyp_panic)
214
	get_host_ctxt x0, x1
215
	b	hyp_panic
216
SYM_CODE_END(__hyp_panic)
217

Marc Zyngier's avatar
Marc Zyngier committed
218
.macro invalid_vector	label, target = __hyp_panic
219
	.align	2
220
SYM_CODE_START(\label)
221
	b \target
222
SYM_CODE_END(\label)
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
.endm

	/* None of these should ever happen */
	invalid_vector	el2t_sync_invalid
	invalid_vector	el2t_irq_invalid
	invalid_vector	el2t_fiq_invalid
	invalid_vector	el2t_error_invalid
	invalid_vector	el2h_sync_invalid
	invalid_vector	el2h_irq_invalid
	invalid_vector	el2h_fiq_invalid
	invalid_vector	el1_fiq_invalid

	.ltorg

	.align 11

239
240
241
242
243
244
245
.macro check_preamble_length start, end
/* kvm_patch_vector_branch() generates code that jumps over the preamble. */
.if ((\end-\start) != KVM_VECTOR_PREAMBLE)
	.error "KVM vector preamble length mismatch"
.endif
.endm

246
247
.macro valid_vect target
	.align 7
248
661:
249
	esb
250
	stp	x0, x1, [sp, #-16]!
251
662:
252
	b	\target
253
254

check_preamble_length 661b, 662b
255
256
257
258
.endm

.macro invalid_vect target
	.align 7
259
661:
260
	b	\target
261
	nop
262
662:
263
264
	ldp	x0, x1, [sp], #16
	b	\target
265
266

check_preamble_length 661b, 662b
267
268
.endm

269
SYM_CODE_START(__kvm_hyp_vector)
270
271
272
273
274
	invalid_vect	el2t_sync_invalid	// Synchronous EL2t
	invalid_vect	el2t_irq_invalid	// IRQ EL2t
	invalid_vect	el2t_fiq_invalid	// FIQ EL2t
	invalid_vect	el2t_error_invalid	// Error EL2t

275
	valid_vect	el2_sync		// Synchronous EL2h
276
277
278
279
280
281
282
283
284
285
286
287
288
	invalid_vect	el2h_irq_invalid	// IRQ EL2h
	invalid_vect	el2h_fiq_invalid	// FIQ EL2h
	valid_vect	el2_error		// Error EL2h

	valid_vect	el1_sync		// Synchronous 64-bit EL1
	valid_vect	el1_irq			// IRQ 64-bit EL1
	invalid_vect	el1_fiq_invalid		// FIQ 64-bit EL1
	valid_vect	el1_error		// Error 64-bit EL1

	valid_vect	el1_sync		// Synchronous 32-bit EL1
	valid_vect	el1_irq			// IRQ 32-bit EL1
	invalid_vect	el1_fiq_invalid		// FIQ 32-bit EL1
	valid_vect	el1_error		// Error 32-bit EL1
289
SYM_CODE_END(__kvm_hyp_vector)
290
291
292
293

#ifdef CONFIG_KVM_INDIRECT_VECTORS
.macro hyp_ventry
	.align 7
294
295
1:	esb
	.rept 26
296
297
298
299
300
	nop
	.endr
/*
 * The default sequence is to directly branch to the KVM vectors,
 * using the computed offset. This applies for VHE as well as
301
 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
302
303
304
305
306
307
308
309
310
311
 *
 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
 * with:
 *
 * stp	x0, x1, [sp, #-16]!
 * movz	x0, #(addr & 0xffff)
 * movk	x0, #((addr >> 16) & 0xffff), lsl #16
 * movk	x0, #((addr >> 32) & 0xffff), lsl #32
 * br	x0
 *
312
313
 * Where:
 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
314
315
316
 * See kvm_patch_vector_branch for details.
 */
alternative_cb	kvm_patch_vector_branch
317
318
	stp	x0, x1, [sp, #-16]!
	b	__kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
	nop
	nop
	nop
alternative_cb_end
.endm

.macro generate_vectors
0:
	.rept 16
	hyp_ventry
	.endr
	.org 0b + SZ_2K		// Safety measure
.endm

	.align	11
334
SYM_CODE_START(__bp_harden_hyp_vecs)
335
336
337
	.rept BP_HARDEN_EL2_SLOTS
	generate_vectors
	.endr
338
339
340
1:	.org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
	.org 1b
SYM_CODE_END(__bp_harden_hyp_vecs)
341
#endif