nested.c 198 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
// SPDX-License-Identifier: GPL-2.0

#include <linux/frame.h>
#include <linux/percpu.h>

#include <asm/debugreg.h>
#include <asm/mmu_context.h>

#include "cpuid.h"
#include "hyperv.h"
#include "mmu.h"
#include "nested.h"
13
#include "pmu.h"
14
15
16
17
18
19
20
21
22
#include "trace.h"
#include "x86.h"

static bool __read_mostly enable_shadow_vmcs = 1;
module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);

static bool __read_mostly nested_early_check = 0;
module_param(nested_early_check, bool, S_IRUGO);

23
24
25
26
#define CC(consistency_check)						\
({									\
	bool failed = (consistency_check);				\
	if (failed)							\
27
		trace_kvm_nested_vmenter_failed(#consistency_check, 0);	\
28
29
30
	failed;								\
})

31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
/*
 * Hyper-V requires all of these, so mark them as supported even though
 * they are just treated the same as all-context.
 */
#define VMX_VPID_EXTENT_SUPPORTED_MASK		\
	(VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT |	\
	VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT |	\
	VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT |	\
	VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)

#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5

enum {
	VMX_VMREAD_BITMAP,
	VMX_VMWRITE_BITMAP,
	VMX_BITMAP_NR
};
static unsigned long *vmx_bitmap[VMX_BITMAP_NR];

#define vmx_vmread_bitmap                    (vmx_bitmap[VMX_VMREAD_BITMAP])
#define vmx_vmwrite_bitmap                   (vmx_bitmap[VMX_VMWRITE_BITMAP])

53
54
55
56
57
58
struct shadow_vmcs_field {
	u16	encoding;
	u16	offset;
};
static struct shadow_vmcs_field shadow_read_only_fields[] = {
#define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
59
60
61
62
63
#include "vmcs_shadow_fields.h"
};
static int max_shadow_read_only_fields =
	ARRAY_SIZE(shadow_read_only_fields);

64
65
static struct shadow_vmcs_field shadow_read_write_fields[] = {
#define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
66
67
68
69
70
#include "vmcs_shadow_fields.h"
};
static int max_shadow_read_write_fields =
	ARRAY_SIZE(shadow_read_write_fields);

71
static void init_vmcs_shadow_fields(void)
72
73
74
75
76
77
78
{
	int i, j;

	memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
	memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);

	for (i = j = 0; i < max_shadow_read_only_fields; i++) {
79
80
		struct shadow_vmcs_field entry = shadow_read_only_fields[i];
		u16 field = entry.encoding;
81
82
83

		if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
		    (i + 1 == max_shadow_read_only_fields ||
84
		     shadow_read_only_fields[i + 1].encoding != field + 1))
85
86
87
88
89
			pr_err("Missing field from shadow_read_only_field %x\n",
			       field + 1);

		clear_bit(field, vmx_vmread_bitmap);
		if (field & 1)
90
#ifdef CONFIG_X86_64
91
			continue;
92
93
#else
			entry.offset += sizeof(u32);
94
#endif
95
		shadow_read_only_fields[j++] = entry;
96
97
98
99
	}
	max_shadow_read_only_fields = j;

	for (i = j = 0; i < max_shadow_read_write_fields; i++) {
100
101
		struct shadow_vmcs_field entry = shadow_read_write_fields[i];
		u16 field = entry.encoding;
102
103
104

		if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
		    (i + 1 == max_shadow_read_write_fields ||
105
		     shadow_read_write_fields[i + 1].encoding != field + 1))
106
107
108
			pr_err("Missing field from shadow_read_write_field %x\n",
			       field + 1);

109
110
		WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
			  field <= GUEST_TR_AR_BYTES,
111
			  "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
112

113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
		/*
		 * PML and the preemption timer can be emulated, but the
		 * processor cannot vmwrite to fields that don't exist
		 * on bare metal.
		 */
		switch (field) {
		case GUEST_PML_INDEX:
			if (!cpu_has_vmx_pml())
				continue;
			break;
		case VMX_PREEMPTION_TIMER_VALUE:
			if (!cpu_has_vmx_preemption_timer())
				continue;
			break;
		case GUEST_INTR_STATUS:
			if (!cpu_has_vmx_apicv())
				continue;
			break;
		default:
			break;
		}

		clear_bit(field, vmx_vmwrite_bitmap);
		clear_bit(field, vmx_vmread_bitmap);
		if (field & 1)
138
#ifdef CONFIG_X86_64
139
			continue;
140
141
#else
			entry.offset += sizeof(u32);
142
#endif
143
		shadow_read_write_fields[j++] = entry;
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
	}
	max_shadow_read_write_fields = j;
}

/*
 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
 * set the success or error code of an emulated VMX instruction (as specified
 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
 * instruction.
 */
static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
{
	vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
			& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
			    X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
	return kvm_skip_emulated_instruction(vcpu);
}

static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
{
	vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
			& ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
			    X86_EFLAGS_SF | X86_EFLAGS_OF))
			| X86_EFLAGS_CF);
	return kvm_skip_emulated_instruction(vcpu);
}

static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
				u32 vm_instruction_error)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	/*
	 * failValid writes the error number to the current VMCS, which
	 * can't be done if there isn't a current VMCS.
	 */
	if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
		return nested_vmx_failInvalid(vcpu);

	vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
			& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
			    X86_EFLAGS_SF | X86_EFLAGS_OF))
			| X86_EFLAGS_ZF);
	get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
	/*
	 * We don't need to force a shadow sync because
	 * VM_INSTRUCTION_ERROR is not shadowed
	 */
	return kvm_skip_emulated_instruction(vcpu);
}

static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
{
	/* TODO: not to reset guest simply here. */
	kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
	pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
}

Marc Orr's avatar
Marc Orr committed
202
203
204
205
206
207
208
209
210
211
static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
{
	return fixed_bits_valid(control, low, high);
}

static inline u64 vmx_control_msr(u32 low, u32 high)
{
	return low | ((u64)high << 32);
}

212
213
static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
{
214
	secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
215
	vmcs_write64(VMCS_LINK_POINTER, -1ull);
216
	vmx->nested.need_vmcs12_to_shadow_sync = false;
217
218
219
220
221
222
223
224
225
}

static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	if (!vmx->nested.hv_evmcs)
		return;

226
	kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
227
	vmx->nested.hv_evmcs_vmptr = 0;
228
229
230
231
232
233
234
235
236
237
238
239
240
241
	vmx->nested.hv_evmcs = NULL;
}

/*
 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
 * just stops using VMX.
 */
static void free_nested(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
		return;

242
243
	kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);

244
245
246
247
248
249
250
251
252
253
254
255
	vmx->nested.vmxon = false;
	vmx->nested.smm.vmxon = false;
	free_vpid(vmx->nested.vpid02);
	vmx->nested.posted_intr_nv = -1;
	vmx->nested.current_vmptr = -1ull;
	if (enable_shadow_vmcs) {
		vmx_disable_shadow_vmcs(vmx);
		vmcs_clear(vmx->vmcs01.shadow_vmcs);
		free_vmcs(vmx->vmcs01.shadow_vmcs);
		vmx->vmcs01.shadow_vmcs = NULL;
	}
	kfree(vmx->nested.cached_vmcs12);
256
	vmx->nested.cached_vmcs12 = NULL;
257
	kfree(vmx->nested.cached_shadow_vmcs12);
258
	vmx->nested.cached_shadow_vmcs12 = NULL;
259
260
	/* Unpin physical memory we referred to in the vmcs02 */
	if (vmx->nested.apic_access_page) {
261
		kvm_release_page_clean(vmx->nested.apic_access_page);
262
263
		vmx->nested.apic_access_page = NULL;
	}
264
	kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
265
266
	kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
	vmx->nested.pi_desc = NULL;
267
268
269
270
271
272
273
274

	kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);

	nested_release_evmcs(vcpu);

	free_loaded_vmcs(&vmx->nested.vmcs02);
}

275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
				     struct loaded_vmcs *prev)
{
	struct vmcs_host_state *dest, *src;

	if (unlikely(!vmx->guest_state_loaded))
		return;

	src = &prev->host_state;
	dest = &vmx->loaded_vmcs->host_state;

	vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
	dest->ldt_sel = src->ldt_sel;
#ifdef CONFIG_X86_64
	dest->ds_sel = src->ds_sel;
	dest->es_sel = src->es_sel;
#endif
}

294
295
296
static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
297
	struct loaded_vmcs *prev;
298
299
300
301
302
303
	int cpu;

	if (vmx->loaded_vmcs == vmcs)
		return;

	cpu = get_cpu();
304
	prev = vmx->loaded_vmcs;
305
	vmx->loaded_vmcs = vmcs;
306
	vmx_vcpu_load_vmcs(vcpu, cpu);
307
	vmx_sync_vmcs_host_state(vmx, prev);
308
309
	put_cpu();

310
	vmx_register_cache_reset(vcpu);
311
312
313
314
315
316
317
318
319
}

/*
 * Ensure that the current vmcs of the logical processor is the
 * vmcs01 of the vcpu before calling free_nested().
 */
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
	vcpu_load(vcpu);
320
	vmx_leave_nested(vcpu);
321
322
323
324
325
326
327
328
329
330
	vmx_switch_vmcs(vcpu, &to_vmx(vcpu)->vmcs01);
	free_nested(vcpu);
	vcpu_put(vcpu);
}

static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
		struct x86_exception *fault)
{
	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
	struct vcpu_vmx *vmx = to_vmx(vcpu);
331
	u32 vm_exit_reason;
332
333
334
	unsigned long exit_qualification = vcpu->arch.exit_qualification;

	if (vmx->nested.pml_full) {
335
		vm_exit_reason = EXIT_REASON_PML_FULL;
336
337
338
		vmx->nested.pml_full = false;
		exit_qualification &= INTR_INFO_UNBLOCK_NMI;
	} else if (fault->error_code & PFERR_RSVD_MASK)
339
		vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
340
	else
341
		vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
342

343
	nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
344
345
346
347
348
349
350
351
352
353
354
355
	vmcs12->guest_physical_address = fault->address;
}

static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
	WARN_ON(mmu_is_nested(vcpu));

	vcpu->arch.mmu = &vcpu->arch.guest_mmu;
	kvm_init_shadow_ept_mmu(vcpu,
			to_vmx(vcpu)->nested.msrs.ept_caps &
			VMX_EPT_EXECUTE_ONLY_BIT,
			nested_ept_ad_enabled(vcpu),
356
			nested_ept_get_eptp(vcpu));
357
	vcpu->arch.mmu->get_guest_pgd     = nested_ept_get_eptp;
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
	vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
	vcpu->arch.mmu->get_pdptr         = kvm_pdptr_read;

	vcpu->arch.walk_mmu              = &vcpu->arch.nested_mmu;
}

static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
	vcpu->arch.mmu = &vcpu->arch.root_mmu;
	vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
}

static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
					    u16 error_code)
{
	bool inequality, bit;

	bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
	inequality =
		(error_code & vmcs12->page_fault_error_code_mask) !=
		 vmcs12->page_fault_error_code_match;
	return inequality ^ bit;
}


/*
 * KVM wants to inject page-faults which it got to the guest. This function
 * checks whether in a nested guest, we need to inject them to L1 or L2.
 */
static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
{
	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
	unsigned int nr = vcpu->arch.exception.nr;
	bool has_payload = vcpu->arch.exception.has_payload;
	unsigned long payload = vcpu->arch.exception.payload;

	if (nr == PF_VECTOR) {
		if (vcpu->arch.exception.nested_apf) {
			*exit_qual = vcpu->arch.apf.nested_apf_token;
			return 1;
		}
		if (nested_vmx_is_page_fault_vmexit(vmcs12,
						    vcpu->arch.exception.error_code)) {
			*exit_qual = has_payload ? payload : vcpu->arch.cr2;
			return 1;
		}
	} else if (vmcs12->exception_bitmap & (1u << nr)) {
		if (nr == DB_VECTOR) {
			if (!has_payload) {
				payload = vcpu->arch.dr6;
				payload &= ~(DR6_FIXED_1 | DR6_BT);
				payload ^= DR6_RTM;
			}
			*exit_qual = payload;
		} else
			*exit_qual = 0;
		return 1;
	}

	return 0;
}


static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
		struct x86_exception *fault)
{
	struct vmcs12 *vmcs12 = get_vmcs12(vcpu);

	WARN_ON(!is_guest_mode(vcpu));

	if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
		!to_vmx(vcpu)->nested.nested_run_pending) {
		vmcs12->vm_exit_intr_error_code = fault->error_code;
		nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
				  PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
				  INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
				  fault->address);
	} else {
		kvm_inject_page_fault(vcpu, fault);
	}
}

static bool page_address_valid(struct kvm_vcpu *vcpu, gpa_t gpa)
{
	return PAGE_ALIGNED(gpa) && !(gpa >> cpuid_maxphyaddr(vcpu));
}

static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
					       struct vmcs12 *vmcs12)
{
	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
		return 0;

451
452
	if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
	    CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
453
454
455
456
457
458
459
460
461
462
463
		return -EINVAL;

	return 0;
}

static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
						struct vmcs12 *vmcs12)
{
	if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
		return 0;

464
	if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
465
466
467
468
469
470
471
472
473
474
475
		return -EINVAL;

	return 0;
}

static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
						struct vmcs12 *vmcs12)
{
	if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
		return 0;

476
	if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
		return -EINVAL;

	return 0;
}

/*
 * Check if MSR is intercepted for L01 MSR bitmap.
 */
static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
{
	unsigned long *msr_bitmap;
	int f = sizeof(unsigned long);

	if (!cpu_has_vmx_msr_bitmap())
		return true;

	msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;

	if (msr <= 0x1fff) {
		return !!test_bit(msr, msr_bitmap + 0x800 / f);
	} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
		msr &= 0x1fff;
		return !!test_bit(msr, msr_bitmap + 0xc00 / f);
	}

	return true;
}

/*
 * If a msr is allowed by L0, we should check whether it is allowed by L1.
 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
 */
static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
					       unsigned long *msr_bitmap_nested,
					       u32 msr, int type)
{
	int f = sizeof(unsigned long);

	/*
	 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
	 * have the write-low and read-high bitmap offsets the wrong way round.
	 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
	 */
	if (msr <= 0x1fff) {
		if (type & MSR_TYPE_R &&
		   !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
			/* read-low */
			__clear_bit(msr, msr_bitmap_nested + 0x000 / f);

		if (type & MSR_TYPE_W &&
		   !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
			/* write-low */
			__clear_bit(msr, msr_bitmap_nested + 0x800 / f);

	} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
		msr &= 0x1fff;
		if (type & MSR_TYPE_R &&
		   !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
			/* read-high */
			__clear_bit(msr, msr_bitmap_nested + 0x400 / f);

		if (type & MSR_TYPE_W &&
		   !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
			/* write-high */
			__clear_bit(msr, msr_bitmap_nested + 0xc00 / f);

	}
}

546
547
static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
{
548
549
550
551
552
553
554
555
556
557
	int msr;

	for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
		unsigned word = msr / BITS_PER_LONG;

		msr_bitmap[word] = ~0;
		msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
	}
}

558
559
560
561
562
563
564
565
566
567
/*
 * Merge L0's and L1's MSR bitmap, return false to indicate that
 * we do not use the hardware.
 */
static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
						 struct vmcs12 *vmcs12)
{
	int msr;
	unsigned long *msr_bitmap_l1;
	unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
568
	struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
569
570
571
572
573
574

	/* Nothing to do if the MSR bitmap is not in use.  */
	if (!cpu_has_vmx_msr_bitmap() ||
	    !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
		return false;

575
	if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
576
577
		return false;

578
	msr_bitmap_l1 = (unsigned long *)map->hva;
579

580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
	/*
	 * To keep the control flow simple, pay eight 8-byte writes (sixteen
	 * 4-byte writes on 32-bit systems) up front to enable intercepts for
	 * the x2APIC MSR range and selectively disable them below.
	 */
	enable_x2apic_msr_intercepts(msr_bitmap_l0);

	if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
		if (nested_cpu_has_apic_reg_virt(vmcs12)) {
			/*
			 * L0 need not intercept reads for MSRs between 0x800
			 * and 0x8ff, it just lets the processor take the value
			 * from the virtual-APIC page; take those 256 bits
			 * directly from the L1 bitmap.
			 */
			for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
				unsigned word = msr / BITS_PER_LONG;

				msr_bitmap_l0[word] = msr_bitmap_l1[word];
			}
		}
601
602
603

		nested_vmx_disable_intercept_for_msr(
			msr_bitmap_l1, msr_bitmap_l0,
604
			X2APIC_MSR(APIC_TASKPRI),
605
			MSR_TYPE_R | MSR_TYPE_W);
606
607
608
609
610
611
612
613
614
615
616

		if (nested_cpu_has_vid(vmcs12)) {
			nested_vmx_disable_intercept_for_msr(
				msr_bitmap_l1, msr_bitmap_l0,
				X2APIC_MSR(APIC_EOI),
				MSR_TYPE_W);
			nested_vmx_disable_intercept_for_msr(
				msr_bitmap_l1, msr_bitmap_l0,
				X2APIC_MSR(APIC_SELF_IPI),
				MSR_TYPE_W);
		}
617
618
	}

619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
	/* KVM unconditionally exposes the FS/GS base MSRs to L1. */
	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
					     MSR_FS_BASE, MSR_TYPE_RW);

	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
					     MSR_GS_BASE, MSR_TYPE_RW);

	nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
					     MSR_KERNEL_GS_BASE, MSR_TYPE_RW);

	/*
	 * Checking the L0->L1 bitmap is trying to verify two things:
	 *
	 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
	 *    ensures that we do not accidentally generate an L02 MSR bitmap
	 *    from the L12 MSR bitmap that is too permissive.
	 * 2. That L1 or L2s have actually used the MSR. This avoids
	 *    unnecessarily merging of the bitmap if the MSR is unused. This
	 *    works properly because we only update the L01 MSR bitmap lazily.
	 *    So even if L0 should pass L1 these MSRs, the L01 bitmap is only
	 *    updated to reflect this when L1 (or its L2s) actually write to
	 *    the MSR.
	 */
	if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
643
644
645
646
647
		nested_vmx_disable_intercept_for_msr(
					msr_bitmap_l1, msr_bitmap_l0,
					MSR_IA32_SPEC_CTRL,
					MSR_TYPE_R | MSR_TYPE_W);

648
	if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
649
650
651
652
653
		nested_vmx_disable_intercept_for_msr(
					msr_bitmap_l1, msr_bitmap_l0,
					MSR_IA32_PRED_CMD,
					MSR_TYPE_W);

654
	kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
655
656
657
658
659
660
661

	return true;
}

static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
				       struct vmcs12 *vmcs12)
{
662
	struct kvm_host_map map;
663
664
665
666
667
668
669
670
	struct vmcs12 *shadow;

	if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
	    vmcs12->vmcs_link_pointer == -1ull)
		return;

	shadow = get_shadow_vmcs12(vcpu);

671
672
	if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
		return;
673

674
675
	memcpy(shadow, map.hva, VMCS12_SIZE);
	kvm_vcpu_unmap(vcpu, &map, false);
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
}

static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
					      struct vmcs12 *vmcs12)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
	    vmcs12->vmcs_link_pointer == -1ull)
		return;

	kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
			get_shadow_vmcs12(vcpu), VMCS12_SIZE);
}

/*
 * In nested virtualization, check if L1 has set
 * VM_EXIT_ACK_INTR_ON_EXIT
 */
static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
{
	return get_vmcs12(vcpu)->vm_exit_controls &
		VM_EXIT_ACK_INTR_ON_EXIT;
}

static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
{
	return nested_cpu_has_nmi_exiting(get_vmcs12(vcpu));
}

static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
					  struct vmcs12 *vmcs12)
{
	if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
710
	    CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
		return -EINVAL;
	else
		return 0;
}

static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
					   struct vmcs12 *vmcs12)
{
	if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
	    !nested_cpu_has_apic_reg_virt(vmcs12) &&
	    !nested_cpu_has_vid(vmcs12) &&
	    !nested_cpu_has_posted_intr(vmcs12))
		return 0;

	/*
	 * If virtualize x2apic mode is enabled,
	 * virtualize apic access must be disabled.
	 */
729
730
	if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
	       nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
731
732
733
734
735
736
		return -EINVAL;

	/*
	 * If virtual interrupt delivery is enabled,
	 * we must exit on external interrupts.
	 */
737
	if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
738
739
740
741
742
743
744
745
746
747
		return -EINVAL;

	/*
	 * bits 15:8 should be zero in posted_intr_nv,
	 * the descriptor address has been already checked
	 * in nested_get_vmcs12_pages.
	 *
	 * bits 5:0 of posted_intr_desc_addr should be zero.
	 */
	if (nested_cpu_has_posted_intr(vmcs12) &&
748
749
750
751
752
	   (CC(!nested_cpu_has_vid(vmcs12)) ||
	    CC(!nested_exit_intr_ack_set(vcpu)) ||
	    CC((vmcs12->posted_intr_nv & 0xff00)) ||
	    CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
	    CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
753
754
755
		return -EINVAL;

	/* tpr shadow is needed by all apicv features. */
756
	if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
757
758
759
760
761
762
		return -EINVAL;

	return 0;
}

static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
763
				       u32 count, u64 addr)
764
765
766
767
768
769
770
{
	int maxphyaddr;

	if (count == 0)
		return 0;
	maxphyaddr = cpuid_maxphyaddr(vcpu);
	if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
771
	    (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
772
		return -EINVAL;
773

774
775
776
	return 0;
}

777
778
static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
						     struct vmcs12 *vmcs12)
779
{
780
781
782
783
784
785
	if (CC(nested_vmx_check_msr_switch(vcpu,
					   vmcs12->vm_exit_msr_load_count,
					   vmcs12->vm_exit_msr_load_addr)) ||
	    CC(nested_vmx_check_msr_switch(vcpu,
					   vmcs12->vm_exit_msr_store_count,
					   vmcs12->vm_exit_msr_store_addr)))
786
		return -EINVAL;
787

788
789
790
	return 0;
}

791
792
static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
                                                      struct vmcs12 *vmcs12)
793
{
794
795
796
	if (CC(nested_vmx_check_msr_switch(vcpu,
					   vmcs12->vm_entry_msr_load_count,
					   vmcs12->vm_entry_msr_load_addr)))
797
798
799
800
801
                return -EINVAL;

	return 0;
}

802
803
804
805
806
807
static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
					 struct vmcs12 *vmcs12)
{
	if (!nested_cpu_has_pml(vmcs12))
		return 0;

808
809
	if (CC(!nested_cpu_has_ept(vmcs12)) ||
	    CC(!page_address_valid(vcpu, vmcs12->pml_address)))
810
811
812
813
814
815
816
817
		return -EINVAL;

	return 0;
}

static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
							struct vmcs12 *vmcs12)
{
818
819
	if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
	       !nested_cpu_has_ept(vmcs12)))
820
821
822
823
824
825
826
		return -EINVAL;
	return 0;
}

static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
							 struct vmcs12 *vmcs12)
{
827
828
	if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
	       !nested_cpu_has_ept(vmcs12)))
829
830
831
832
833
834
835
836
837
838
		return -EINVAL;
	return 0;
}

static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
						 struct vmcs12 *vmcs12)
{
	if (!nested_cpu_has_shadow_vmcs(vmcs12))
		return 0;

839
840
	if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
	    CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
841
842
843
844
845
846
847
848
849
		return -EINVAL;

	return 0;
}

static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
				       struct vmx_msr_entry *e)
{
	/* x2APIC MSR accesses are not allowed */
850
	if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
851
		return -EINVAL;
852
853
	if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
	    CC(e->index == MSR_IA32_UCODE_REV))
854
		return -EINVAL;
855
	if (CC(e->reserved != 0))
856
857
858
859
860
861
862
		return -EINVAL;
	return 0;
}

static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
				     struct vmx_msr_entry *e)
{
863
864
865
	if (CC(e->index == MSR_FS_BASE) ||
	    CC(e->index == MSR_GS_BASE) ||
	    CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
866
867
868
869
870
871
872
873
	    nested_vmx_msr_check_common(vcpu, e))
		return -EINVAL;
	return 0;
}

static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
				      struct vmx_msr_entry *e)
{
874
	if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
875
876
877
878
879
	    nested_vmx_msr_check_common(vcpu, e))
		return -EINVAL;
	return 0;
}

Marc Orr's avatar
Marc Orr committed
880
881
882
883
884
885
886
887
888
static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);
	u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
				       vmx->nested.msrs.misc_high);

	return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
}

889
890
891
/*
 * Load guest's/host's msr at nested entry/exit.
 * return 0 for success, entry index for failure.
Marc Orr's avatar
Marc Orr committed
892
893
894
895
896
 *
 * One of the failure modes for MSR load/store is when a list exceeds the
 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
 * as possible, process all valid entries before failing rather than precheck
 * for a capacity violation.
897
898
899
900
901
 */
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{
	u32 i;
	struct vmx_msr_entry e;
Marc Orr's avatar
Marc Orr committed
902
	u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
903
904

	for (i = 0; i < count; i++) {
Marc Orr's avatar
Marc Orr committed
905
906
907
		if (unlikely(i >= max_msr_list_size))
			goto fail;

908
909
910
911
912
913
914
915
916
917
918
919
920
		if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
					&e, sizeof(e))) {
			pr_debug_ratelimited(
				"%s cannot read MSR entry (%u, 0x%08llx)\n",
				__func__, i, gpa + i * sizeof(e));
			goto fail;
		}
		if (nested_vmx_load_msr_check(vcpu, &e)) {
			pr_debug_ratelimited(
				"%s check failed (%u, 0x%x, 0x%x)\n",
				__func__, i, e.index, e.reserved);
			goto fail;
		}
921
		if (kvm_set_msr(vcpu, e.index, e.value)) {
922
923
924
925
926
927
928
929
930
931
932
			pr_debug_ratelimited(
				"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
				__func__, i, e.index, e.value);
			goto fail;
		}
	}
	return 0;
fail:
	return i + 1;
}

933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
					    u32 msr_index,
					    u64 *data)
{
	struct vcpu_vmx *vmx = to_vmx(vcpu);

	/*
	 * If the L0 hypervisor stored a more accurate value for the TSC that
	 * does not include the time taken for emulation of the L2->L1
	 * VM-exit in L0, use the more accurate value.
	 */
	if (msr_index == MSR_IA32_TSC) {
		int index = vmx_find_msr_index(&vmx->msr_autostore.guest,
					       MSR_IA32_TSC);

		if (index >= 0) {
			u64 val = vmx->msr_autostore.guest.val[index].value;

			*data = kvm_read_l1_tsc(vcpu, val);
			return true;
		}
	}

	if (kvm_get_msr(vcpu, msr_index, data)) {
		pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
			msr_index);
		return false;
	}
	return true;
}

964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
				     struct vmx_msr_entry *e)
{
	if (kvm_vcpu_read_guest(vcpu,
				gpa + i * sizeof(*e),
				e, 2 * sizeof(u32))) {
		pr_debug_ratelimited(
			"%s cannot read MSR entry (%u, 0x%08llx)\n",
			__func__, i, gpa + i * sizeof(*e));
		return false;
	}
	if (nested_vmx_store_msr_check(vcpu, e)) {
		pr_debug_ratelimited(
			"%s check failed (%u, 0x%x, 0x%x)\n",
			__func__, i, e->index, e->reserved);
		return false;
	}
	return true;
}

984
985
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{
986
	u64 data;
987
988
	u32 i;
	struct vmx_msr_entry e;
Marc Orr's avatar
Marc Orr committed
989
	u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
990
991

	for (i = 0; i < count; i++) {
Marc Orr's avatar
Marc Orr committed
992
993
994
		if (unlikely(i >= max_msr_list_size))
			return -EINVAL;

995
		if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
996
			return -EINVAL;
997

998
		if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
999
			return -EINVAL;
1000

For faster browsing, not all history is shown. View entire blame