vmx.c 227 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
Avi Kivity's avatar
Avi Kivity committed
2
3
4
5
6
7
8
/*
 * Kernel-based Virtual Machine driver for Linux
 *
 * This module enables machines with Intel VT-x extensions to run virtual
 * machines without emulation or binary translation.
 *
 * Copyright (C) 2006 Qumranet, Inc.
9
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity's avatar
Avi Kivity committed
10
11
12
13
14
15
 *
 * Authors:
 *   Avi Kivity   <avi@qumranet.com>
 *   Yaniv Kamay  <yaniv@qumranet.com>
 */

16
17
18
19
#include <linux/frame.h>
#include <linux/highmem.h>
#include <linux/hrtimer.h>
#include <linux/kernel.h>
20
#include <linux/kvm_host.h>
Avi Kivity's avatar
Avi Kivity committed
21
#include <linux/module.h>
22
#include <linux/moduleparam.h>
23
#include <linux/mod_devicetable.h>
24
25
#include <linux/mm.h>
#include <linux/sched.h>
26
#include <linux/sched/smt.h>
27
#include <linux/slab.h>
28
#include <linux/tboot.h>
29
#include <linux/trace_events.h>
Avi Kivity's avatar
Avi Kivity committed
30

31
#include <asm/apic.h>
32
#include <asm/asm.h>
33
#include <asm/cpu.h>
34
#include <asm/cpu_device_id.h>
35
#include <asm/debugreg.h>
36
#include <asm/desc.h>
37
#include <asm/fpu/internal.h>
38
#include <asm/io.h>
39
#include <asm/irq_remapping.h>
40
41
42
#include <asm/kexec.h>
#include <asm/perf_event.h>
#include <asm/mce.h>
43
#include <asm/mmu_context.h>
44
#include <asm/mshyperv.h>
45
#include <asm/mwait.h>
46
47
48
#include <asm/spec-ctrl.h>
#include <asm/virtext.h>
#include <asm/vmx.h>
Avi Kivity's avatar
Avi Kivity committed
49

50
#include "capabilities.h"
51
#include "cpuid.h"
52
#include "evmcs.h"
53
54
55
56
#include "irq.h"
#include "kvm_cache_regs.h"
#include "lapic.h"
#include "mmu.h"
57
#include "nested.h"
58
#include "ops.h"
59
#include "pmu.h"
60
#include "trace.h"
61
#include "vmcs.h"
62
#include "vmcs12.h"
63
#include "vmx.h"
64
#include "x86.h"
65

Avi Kivity's avatar
Avi Kivity committed
66
67
68
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

69
#ifdef MODULE
70
static const struct x86_cpu_id vmx_cpu_id[] = {
71
	X86_MATCH_FEATURE(X86_FEATURE_VMX, NULL),
72
73
74
	{}
};
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
75
#endif
76

77
bool __read_mostly enable_vpid = 1;
78
module_param_named(vpid, enable_vpid, bool, 0444);
79

80
81
82
static bool __read_mostly enable_vnmi = 1;
module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);

83
bool __read_mostly flexpriority_enabled = 1;
84
module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
85

86
bool __read_mostly enable_ept = 1;
87
module_param_named(ept, enable_ept, bool, S_IRUGO);
Sheng Yang's avatar
Sheng Yang committed
88

89
bool __read_mostly enable_unrestricted_guest = 1;
90
91
92
module_param_named(unrestricted_guest,
			enable_unrestricted_guest, bool, S_IRUGO);

93
bool __read_mostly enable_ept_ad_bits = 1;
94
95
module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);

96
static bool __read_mostly emulate_invalid_guest_state = true;
97
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
98

99
static bool __read_mostly fasteoi = 1;
100
101
module_param(fasteoi, bool, S_IRUGO);

102
bool __read_mostly enable_apicv = 1;
103
module_param(enable_apicv, bool, S_IRUGO);
104

105
106
107
108
109
/*
 * If nested=1, nested virtualization is supported, i.e., guests may use
 * VMX and be a hypervisor for its own guests. If nested=0, guests may not
 * use VMX instructions.
 */
110
static bool __read_mostly nested = 1;
111
112
module_param(nested, bool, S_IRUGO);

113
bool __read_mostly enable_pml = 1;
Kai Huang's avatar
Kai Huang committed
114
115
module_param_named(pml, enable_pml, bool, S_IRUGO);

116
117
118
static bool __read_mostly dump_invalid_vmcs = 0;
module_param(dump_invalid_vmcs, bool, 0644);

119
120
121
#define MSR_BITMAP_MODE_X2APIC		1
#define MSR_BITMAP_MODE_X2APIC_APICV	2

122
123
#define KVM_VMX_TSC_MULTIPLIER_MAX     0xffffffffffffffffULL

124
125
126
127
128
129
130
/* Guest_tsc -> host_tsc conversion requires 64-bit division.  */
static int __read_mostly cpu_preemption_timer_multi;
static bool __read_mostly enable_preemption_timer = 1;
#ifdef CONFIG_X86_64
module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO);
#endif

131
#define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD)
132
133
134
135
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE
#define KVM_VM_CR0_ALWAYS_ON				\
	(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | 	\
	 X86_CR0_WP | X86_CR0_PG | X86_CR0_PE)
136
137
#define KVM_CR4_GUEST_OWNED_BITS				      \
	(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
138
	 | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_TSD)
139

140
#define KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR4_VMXE
141
142
143
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)

144
145
#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))

146
147
148
149
150
151
152
153
#define MSR_IA32_RTIT_STATUS_MASK (~(RTIT_STATUS_FILTEREN | \
	RTIT_STATUS_CONTEXTEN | RTIT_STATUS_TRIGGEREN | \
	RTIT_STATUS_ERROR | RTIT_STATUS_STOPPED | \
	RTIT_STATUS_BYTECNT))

#define MSR_IA32_RTIT_OUTPUT_BASE_MASK \
	(~((1UL << cpuid_query_maxphyaddr(vcpu)) - 1) | 0x7f)

154
155
156
157
/*
 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
 * ple_gap:    upper bound on the amount of time between two successive
 *             executions of PAUSE in a loop. Also indicate if ple enabled.
158
 *             According to test, this time is usually smaller than 128 cycles.
159
160
161
162
163
164
 * ple_window: upper bound on the amount of time a guest is allowed to execute
 *             in a PAUSE loop. Tests indicate that most spinlocks are held for
 *             less than 2^12 cycles
 * Time is measured based on a counter that runs at the same rate as the TSC,
 * refer SDM volume 3b section 21.6.13 & 22.1.3.
 */
165
static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP;
166
module_param(ple_gap, uint, 0444);
167

168
169
static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, uint, 0444);
170

171
/* Default doubles per-vcpu window every exit. */
172
static unsigned int ple_window_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
173
module_param(ple_window_grow, uint, 0444);
174
175

/* Default resets per-vcpu window every exit to ple_window. */
176
static unsigned int ple_window_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
177
module_param(ple_window_shrink, uint, 0444);
178
179

/* Default is to compute the maximum so we can never overflow. */
180
181
static unsigned int ple_window_max        = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, uint, 0444);
182

183
184
185
186
/* Default is SYSTEM mode, 1 for host-guest mode */
int __read_mostly pt_mode = PT_MODE_SYSTEM;
module_param(pt_mode, int, S_IRUGO);

187
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
188
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
189
static DEFINE_MUTEX(vmx_l1d_flush_mutex);
190

191
192
/* Storage for pre module init parameter parsing */
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
193
194
195

static const struct {
	const char *option;
196
	bool for_parse;
197
} vmentry_l1d_param[] = {
198
199
200
201
202
203
	[VMENTER_L1D_FLUSH_AUTO]	 = {"auto", true},
	[VMENTER_L1D_FLUSH_NEVER]	 = {"never", true},
	[VMENTER_L1D_FLUSH_COND]	 = {"cond", true},
	[VMENTER_L1D_FLUSH_ALWAYS]	 = {"always", true},
	[VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false},
	[VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false},
204
205
};

206
207
208
209
#define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;

static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
210
{
211
	struct page *page;
212
	unsigned int i;
213

214
215
216
217
218
	if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
		return 0;
	}

219
220
221
	if (!enable_ept) {
		l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
		return 0;
222
223
	}

224
225
226
227
228
229
230
231
232
	if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) {
		u64 msr;

		rdmsrl(MSR_IA32_ARCH_CAPABILITIES, msr);
		if (msr & ARCH_CAP_SKIP_VMENTRY_L1DFLUSH) {
			l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
			return 0;
		}
	}
233

234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
	/* If set to auto use the default l1tf mitigation method */
	if (l1tf == VMENTER_L1D_FLUSH_AUTO) {
		switch (l1tf_mitigation) {
		case L1TF_MITIGATION_OFF:
			l1tf = VMENTER_L1D_FLUSH_NEVER;
			break;
		case L1TF_MITIGATION_FLUSH_NOWARN:
		case L1TF_MITIGATION_FLUSH:
		case L1TF_MITIGATION_FLUSH_NOSMT:
			l1tf = VMENTER_L1D_FLUSH_COND;
			break;
		case L1TF_MITIGATION_FULL:
		case L1TF_MITIGATION_FULL_FORCE:
			l1tf = VMENTER_L1D_FLUSH_ALWAYS;
			break;
		}
	} else if (l1tf_mitigation == L1TF_MITIGATION_FULL_FORCE) {
		l1tf = VMENTER_L1D_FLUSH_ALWAYS;
	}

254
255
	if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
	    !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
256
257
258
259
		/*
		 * This allocation for vmx_l1d_flush_pages is not tied to a VM
		 * lifetime and so should not be charged to a memcg.
		 */
260
261
262
263
		page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
		if (!page)
			return -ENOMEM;
		vmx_l1d_flush_pages = page_address(page);
264
265
266
267
268
269
270
271
272
273

		/*
		 * Initialize each page with a different pattern in
		 * order to protect against KSM in the nested
		 * virtualization case.
		 */
		for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
			memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
			       PAGE_SIZE);
		}
274
275
276
277
	}

	l1tf_vmx_mitigation = l1tf;

278
279
280
281
	if (l1tf != VMENTER_L1D_FLUSH_NEVER)
		static_branch_enable(&vmx_l1d_should_flush);
	else
		static_branch_disable(&vmx_l1d_should_flush);
282

283
284
	if (l1tf == VMENTER_L1D_FLUSH_COND)
		static_branch_enable(&vmx_l1d_flush_cond);
285
	else
286
		static_branch_disable(&vmx_l1d_flush_cond);
287
288
289
290
291
292
293
294
295
	return 0;
}

static int vmentry_l1d_flush_parse(const char *s)
{
	unsigned int i;

	if (s) {
		for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
296
297
298
			if (vmentry_l1d_param[i].for_parse &&
			    sysfs_streq(s, vmentry_l1d_param[i].option))
				return i;
299
300
		}
	}
301
302
303
	return -EINVAL;
}

304
305
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
{
306
	int l1tf, ret;
307
308
309
310
311

	l1tf = vmentry_l1d_flush_parse(s);
	if (l1tf < 0)
		return l1tf;

312
313
314
	if (!boot_cpu_has(X86_BUG_L1TF))
		return 0;

315
316
317
318
319
320
321
322
323
324
325
	/*
	 * Has vmx_init() run already? If not then this is the pre init
	 * parameter parsing. In that case just store the value and let
	 * vmx_init() do the proper setup after enable_ept has been
	 * established.
	 */
	if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
		vmentry_l1d_flush_param = l1tf;
		return 0;
	}

326
327
328
329
	mutex_lock(&vmx_l1d_flush_mutex);
	ret = vmx_setup_l1d_flush(l1tf);
	mutex_unlock(&vmx_l1d_flush_mutex);
	return ret;
330
331
}

332
333
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
{
334
335
336
	if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param)))
		return sprintf(s, "???\n");

337
	return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
338
339
340
341
342
343
}

static const struct kernel_param_ops vmentry_l1d_flush_ops = {
	.set = vmentry_l1d_flush_set,
	.get = vmentry_l1d_flush_get,
};
344
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, 0644);
345

346
347
static bool guest_state_valid(struct kvm_vcpu *vcpu);
static u32 vmx_segment_access_rights(struct kvm_segment *var);
348
static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
Ashok Raj's avatar
Ashok Raj committed
349
							  u32 msr, int type);
350

351
352
void vmx_vmexit(void);

353
354
355
356
357
358
#define vmx_insn_failed(fmt...)		\
do {					\
	WARN_ONCE(1, fmt);		\
	pr_warn_ratelimited(fmt);	\
} while (0)

359
360
361
362
363
364
365
366
asmlinkage void vmread_error(unsigned long field, bool fault)
{
	if (fault)
		kvm_spurious_fault();
	else
		vmx_insn_failed("kvm: vmread failed: field=%lx\n", field);
}

367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
noinline void vmwrite_error(unsigned long field, unsigned long value)
{
	vmx_insn_failed("kvm: vmwrite failed: field=%lx val=%lx err=%d\n",
			field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
}

noinline void vmclear_error(struct vmcs *vmcs, u64 phys_addr)
{
	vmx_insn_failed("kvm: vmclear failed: %p/%llx\n", vmcs, phys_addr);
}

noinline void vmptrld_error(struct vmcs *vmcs, u64 phys_addr)
{
	vmx_insn_failed("kvm: vmptrld failed: %p/%llx\n", vmcs, phys_addr);
}

noinline void invvpid_error(unsigned long ext, u16 vpid, gva_t gva)
{
	vmx_insn_failed("kvm: invvpid failed: ext=0x%lx vpid=%u gva=0x%lx\n",
			ext, vpid, gva);
}

noinline void invept_error(unsigned long ext, u64 eptp, gpa_t gpa)
{
	vmx_insn_failed("kvm: invept failed: ext=0x%lx eptp=%llx gpa=0x%llx\n",
			ext, eptp, gpa);
}

Avi Kivity's avatar
Avi Kivity committed
395
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
396
DEFINE_PER_CPU(struct vmcs *, current_vmcs);
397
398
399
400
401
/*
 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
 * when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
 */
static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
Avi Kivity's avatar
Avi Kivity committed
402

403
404
405
406
407
408
409
/*
 * We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
 * can find which vCPU should be waken up.
 */
static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);

410
411
412
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);

413
414
struct vmcs_config vmcs_config;
struct vmx_capability vmx_capability;
Sheng Yang's avatar
Sheng Yang committed
415

Avi Kivity's avatar
Avi Kivity committed
416
417
418
419
420
421
422
423
#define VMX_SEGMENT_FIELD(seg)					\
	[VCPU_SREG_##seg] = {                                   \
		.selector = GUEST_##seg##_SELECTOR,		\
		.base = GUEST_##seg##_BASE,		   	\
		.limit = GUEST_##seg##_LIMIT,		   	\
		.ar_bytes = GUEST_##seg##_AR_BYTES,	   	\
	}

424
static const struct kvm_vmx_segment_field {
Avi Kivity's avatar
Avi Kivity committed
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
	unsigned selector;
	unsigned base;
	unsigned limit;
	unsigned ar_bytes;
} kvm_vmx_segment_fields[] = {
	VMX_SEGMENT_FIELD(CS),
	VMX_SEGMENT_FIELD(DS),
	VMX_SEGMENT_FIELD(ES),
	VMX_SEGMENT_FIELD(FS),
	VMX_SEGMENT_FIELD(GS),
	VMX_SEGMENT_FIELD(SS),
	VMX_SEGMENT_FIELD(TR),
	VMX_SEGMENT_FIELD(LDTR),
};

440
441
442
443
444
static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
	vmx->segment_cache.bitmask = 0;
}

445
static unsigned long host_idt_base;
446

447
/*
448
449
450
451
452
 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
 * will emulate SYSCALL in legacy mode if the vendor string in guest
 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
 * support this emulation, IA32_STAR must always be included in
 * vmx_msr_index[], even in i386 builds.
453
 */
454
const u32 vmx_msr_index[] = {
455
#ifdef CONFIG_X86_64
456
	MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
Avi Kivity's avatar
Avi Kivity committed
457
#endif
Brian Gerst's avatar
Brian Gerst committed
458
	MSR_EFER, MSR_TSC_AUX, MSR_STAR,
459
	MSR_IA32_TSX_CTRL,
Avi Kivity's avatar
Avi Kivity committed
460
461
};

462
463
464
465
#if IS_ENABLED(CONFIG_HYPERV)
static bool __read_mostly enlightened_vmcs = true;
module_param(enlightened_vmcs, bool, 0444);

466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
/* check_ept_pointer() should be under protection of ept_pointer_lock. */
static void check_ept_pointer_match(struct kvm *kvm)
{
	struct kvm_vcpu *vcpu;
	u64 tmp_eptp = INVALID_PAGE;
	int i;

	kvm_for_each_vcpu(i, vcpu, kvm) {
		if (!VALID_PAGE(tmp_eptp)) {
			tmp_eptp = to_vmx(vcpu)->ept_pointer;
		} else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) {
			to_kvm_vmx(kvm)->ept_pointers_match
				= EPT_POINTERS_MISMATCH;
			return;
		}
	}

	to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH;
}

486
static int kvm_fill_hv_flush_list_func(struct hv_guest_mapping_flush_list *flush,
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
		void *data)
{
	struct kvm_tlb_range *range = data;

	return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn,
			range->pages);
}

static inline int __hv_remote_flush_tlb_with_range(struct kvm *kvm,
		struct kvm_vcpu *vcpu, struct kvm_tlb_range *range)
{
	u64 ept_pointer = to_vmx(vcpu)->ept_pointer;

	/*
	 * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs address
	 * of the base of EPT PML4 table, strip off EPT configuration
	 * information.
	 */
	if (range)
		return hyperv_flush_guest_mapping_range(ept_pointer & PAGE_MASK,
				kvm_fill_hv_flush_list_func, (void *)range);
	else
		return hyperv_flush_guest_mapping(ept_pointer & PAGE_MASK);
}

static int hv_remote_flush_tlb_with_range(struct kvm *kvm,
		struct kvm_tlb_range *range)
514
{
515
	struct kvm_vcpu *vcpu;
516
	int ret = 0, i;
517
518
519
520
521
522
523

	spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock);

	if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK)
		check_ept_pointer_match(kvm);

	if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) {
524
		kvm_for_each_vcpu(i, vcpu, kvm) {
525
526
527
528
			/* If ept_pointer is invalid pointer, bypass flush request. */
			if (VALID_PAGE(to_vmx(vcpu)->ept_pointer))
				ret |= __hv_remote_flush_tlb_with_range(
					kvm, vcpu, range);
529
		}
530
	} else {
531
532
		ret = __hv_remote_flush_tlb_with_range(kvm,
				kvm_get_vcpu(kvm, 0), range);
533
534
535
536
537
	}

	spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
	return ret;
}
538
539
540
541
542
static int hv_remote_flush_tlb(struct kvm *kvm)
{
	return hv_remote_flush_tlb_with_range(kvm, NULL);
}

543
544
545
546
547
548
549
550
551
static int hv_enable_direct_tlbflush(struct kvm_vcpu *vcpu)
{
	struct hv_enlightened_vmcs *evmcs;
	struct hv_partition_assist_pg **p_hv_pa_pg =
			&vcpu->kvm->arch.hyperv.hv_pa_pg;
	/*
	 * Synthetic VM-Exit is not enabled in current code and so All
	 * evmcs in singe VM shares same assist page.
	 */
552
	if (!*p_hv_pa_pg)
553
		*p_hv_pa_pg = kzalloc(PAGE_SIZE, GFP_KERNEL);
554
555
556

	if (!*p_hv_pa_pg)
		return -ENOMEM;
557
558
559
560
561

	evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs;

	evmcs->partition_assist_page =
		__pa(*p_hv_pa_pg);
562
	evmcs->hv_vm_id = (unsigned long)vcpu->kvm;
563
564
565
566
567
	evmcs->hv_enlightenments_control.nested_flush_hypercall = 1;

	return 0;
}

568
569
#endif /* IS_ENABLED(CONFIG_HYPERV) */

570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
/*
 * Comment's format: document - errata name - stepping - processor name.
 * Refer from
 * https://www.virtualbox.org/svn/vbox/trunk/src/VBox/VMM/VMMR0/HMR0.cpp
 */
static u32 vmx_preemption_cpu_tfms[] = {
/* 323344.pdf - BA86   - D0 - Xeon 7500 Series */
0x000206E6,
/* 323056.pdf - AAX65  - C2 - Xeon L3406 */
/* 322814.pdf - AAT59  - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
/* 322911.pdf - AAU65  - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
0x00020652,
/* 322911.pdf - AAU65  - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
0x00020655,
/* 322373.pdf - AAO95  - B1 - Xeon 3400 Series */
/* 322166.pdf - AAN92  - B1 - i7-800 and i5-700 Desktop */
/*
 * 320767.pdf - AAP86  - B1 -
 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
 */
0x000106E5,
/* 321333.pdf - AAM126 - C0 - Xeon 3500 */
0x000106A0,
/* 321333.pdf - AAM126 - C1 - Xeon 3500 */
0x000106A1,
/* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
0x000106A4,
 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
0x000106A5,
601
602
 /* Xeon E3-1220 V2 */
0x000306A8,
603
604
605
606
607
608
609
610
};

static inline bool cpu_has_broken_vmx_preemption_timer(void)
{
	u32 eax = cpuid_eax(0x00000001), i;

	/* Clear the reserved bits */
	eax &= ~(0x3U << 14 | 0xfU << 28);
611
	for (i = 0; i < ARRAY_SIZE(vmx_preemption_cpu_tfms); i++)
612
613
614
615
616
617
		if (eax == vmx_preemption_cpu_tfms[i])
			return true;

	return false;
}

618
static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
619
{
620
	return flexpriority_enabled && lapic_in_kernel(vcpu);
621
622
}

623
624
625
626
627
static inline bool report_flexpriority(void)
{
	return flexpriority_enabled;
}

628
static inline int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
629
630
631
{
	int i;

632
	for (i = 0; i < vmx->nmsrs; ++i)
633
		if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
634
635
636
637
			return i;
	return -1;
}

638
struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
639
640
641
{
	int i;

642
	i = __find_msr_index(vmx, msr);
643
	if (i >= 0)
644
		return &vmx->guest_msrs[i];
Al Viro's avatar
Al Viro committed
645
	return NULL;
646
647
}

648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
static int vmx_set_guest_msr(struct vcpu_vmx *vmx, struct shared_msr_entry *msr, u64 data)
{
	int ret = 0;

	u64 old_msr_data = msr->data;
	msr->data = data;
	if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
		preempt_disable();
		ret = kvm_set_shared_msr(msr->index, msr->data,
					 msr->mask);
		preempt_enable();
		if (ret)
			msr->data = old_msr_data;
	}
	return ret;
}

665
#ifdef CONFIG_KEXEC_CORE
666
667
668
669
670
671
672
673
674
static void crash_vmclear_local_loaded_vmcss(void)
{
	int cpu = raw_smp_processor_id();
	struct loaded_vmcs *v;

	list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
			    loaded_vmcss_on_cpu_link)
		vmcs_clear(v->vmcs);
}
675
#endif /* CONFIG_KEXEC_CORE */
676

677
static void __loaded_vmcs_clear(void *arg)
Avi Kivity's avatar
Avi Kivity committed
678
{
679
	struct loaded_vmcs *loaded_vmcs = arg;
680
	int cpu = raw_smp_processor_id();
Avi Kivity's avatar
Avi Kivity committed
681

682
683
684
	if (loaded_vmcs->cpu != cpu)
		return; /* vcpu migration can race with cpu offline */
	if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
Avi Kivity's avatar
Avi Kivity committed
685
		per_cpu(current_vmcs, cpu) = NULL;
686
687
688
689
690

	vmcs_clear(loaded_vmcs->vmcs);
	if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
		vmcs_clear(loaded_vmcs->shadow_vmcs);

691
	list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
692
693

	/*
694
695
696
697
698
	 * Ensure all writes to loaded_vmcs, including deleting it from its
	 * current percpu list, complete before setting loaded_vmcs->vcpu to
	 * -1, otherwise a different cpu can see vcpu == -1 first and add
	 * loaded_vmcs to its percpu list before it's deleted from this cpu's
	 * list. Pairs with the smp_rmb() in vmx_vcpu_load_vmcs().
699
700
701
	 */
	smp_wmb();

702
703
	loaded_vmcs->cpu = -1;
	loaded_vmcs->launched = 0;
Avi Kivity's avatar
Avi Kivity committed
704
705
}

706
void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
707
{
708
709
710
711
712
	int cpu = loaded_vmcs->cpu;

	if (cpu != -1)
		smp_call_function_single(cpu,
			 __loaded_vmcs_clear, loaded_vmcs, 1);
713
714
}

715
716
717
718
719
720
static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
				       unsigned field)
{
	bool ret;
	u32 mask = 1 << (seg * SEG_FIELD_NR + field);

721
722
	if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) {
		kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS);
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
		vmx->segment_cache.bitmask = 0;
	}
	ret = vmx->segment_cache.bitmask & mask;
	vmx->segment_cache.bitmask |= mask;
	return ret;
}

static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
{
	u16 *p = &vmx->segment_cache.seg[seg].selector;

	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
		*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
	return *p;
}

static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
{
	ulong *p = &vmx->segment_cache.seg[seg].base;

	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
		*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
	return *p;
}

static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
{
	u32 *p = &vmx->segment_cache.seg[seg].limit;

	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
		*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
	return *p;
}

static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
{
	u32 *p = &vmx->segment_cache.seg[seg].ar;

	if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
		*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
	return *p;
}

766
void update_exception_bitmap(struct kvm_vcpu *vcpu)
767
768
769
{
	u32 eb;

Jan Kiszka's avatar
Jan Kiszka committed
770
	eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
771
	     (1u << DB_VECTOR) | (1u << AC_VECTOR);
772
773
774
775
776
777
778
779
	/*
	 * Guest access to VMware backdoor ports could legitimately
	 * trigger #GP because of TSS I/O permission bitmap.
	 * We intercept those #GP and allow access to them anyway
	 * as VMware does.
	 */
	if (enable_vmware_backdoor)
		eb |= (1u << GP_VECTOR);
Jan Kiszka's avatar
Jan Kiszka committed
780
781
782
783
	if ((vcpu->guest_debug &
	     (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
	    (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
		eb |= 1u << BP_VECTOR;
784
	if (to_vmx(vcpu)->rmode.vm86_active)
785
		eb = ~0;
786
	if (enable_ept)
Miaohe Lin's avatar
Miaohe Lin committed
787
		eb &= ~(1u << PF_VECTOR);
788
789
790
791
792
793
794
795
796

	/* When we are running a nested L2 guest and L1 specified for it a
	 * certain exception bitmap, we must trap the same exceptions and pass
	 * them to L1. When running L2, we will only handle the exceptions
	 * specified above if L1 did not want them.
	 */
	if (is_guest_mode(vcpu))
		eb |= get_vmcs12(vcpu)->exception_bitmap;

797
798
799
	vmcs_write32(EXCEPTION_BITMAP, eb);
}

800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
/*
 * Check if MSR is intercepted for currently loaded MSR bitmap.
 */
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr)
{
	unsigned long *msr_bitmap;
	int f = sizeof(unsigned long);

	if (!cpu_has_vmx_msr_bitmap())
		return true;

	msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap;

	if (msr <= 0x1fff) {
		return !!test_bit(msr, msr_bitmap + 0x800 / f);
	} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
		msr &= 0x1fff;
		return !!test_bit(msr, msr_bitmap + 0xc00 / f);
	}

	return true;
}

823
824
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
		unsigned long entry, unsigned long exit)
825
{
826
827
	vm_entry_controls_clearbit(vmx, entry);
	vm_exit_controls_clearbit(vmx, exit);
828
829
}

830
int vmx_find_msr_index(struct vmx_msrs *m, u32 msr)
831
832
833
834
835
836
837
838
839
840
{
	unsigned int i;

	for (i = 0; i < m->nr; ++i) {
		if (m->val[i].index == msr)
			return i;
	}
	return -ENOENT;
}

841
842
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
{
843
	int i;
844
845
	struct msr_autoload *m = &vmx->msr_autoload;

846
847
	switch (msr) {
	case MSR_EFER:
848
		if (cpu_has_load_ia32_efer()) {
849
850
			clear_atomic_switch_msr_special(vmx,
					VM_ENTRY_LOAD_IA32_EFER,
851
852
853
854
855
					VM_EXIT_LOAD_IA32_EFER);
			return;
		}
		break;
	case MSR_CORE_PERF_GLOBAL_CTRL:
856
		if (cpu_has_load_perf_global_ctrl()) {
857
			clear_atomic_switch_msr_special(vmx,
858
859
860
861
862
					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
			return;
		}
		break;
863
	}
864
	i = vmx_find_msr_index(&m->guest, msr);
865
	if (i < 0)
866
		goto skip_guest;
867
868
869
	--m->guest.nr;
	m->guest.val[i] = m->guest.val[m->guest.nr];
	vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
870

871
skip_guest:
872
	i = vmx_find_msr_index(&m->host, msr);
873
	if (i < 0)
874
		return;
875
876
877

	--m->host.nr;
	m->host.val[i] = m->host.val[m->host.nr];
878
	vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
879
880
}

881
882
883
884
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
		unsigned long entry, unsigned long exit,
		unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
		u64 guest_val, u64 host_val)
885
886
{
	vmcs_write64(guest_val_vmcs, guest_val);
887
888
	if (host_val_vmcs != HOST_IA32_EFER)
		vmcs_write64(host_val_vmcs, host_val);
889
890
	vm_entry_controls_setbit(vmx, entry);
	vm_exit_controls_setbit(vmx, exit);
891
892
}

893
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
894
				  u64 guest_val, u64 host_val, bool entry_only)
895
{
896
	int i, j = 0;
897
898
	struct msr_autoload *m = &vmx->msr_autoload;

899
900
	switch (msr) {
	case MSR_EFER:
901
		if (cpu_has_load_ia32_efer()) {
902
903
			add_atomic_switch_msr_special(vmx,
					VM_ENTRY_LOAD_IA32_EFER,
904
905
906
907
908
909
910
911
					VM_EXIT_LOAD_IA32_EFER,
					GUEST_IA32_EFER,
					HOST_IA32_EFER,
					guest_val, host_val);
			return;
		}
		break;
	case MSR_CORE_PERF_GLOBAL_CTRL:
912
		if (cpu_has_load_perf_global_ctrl()) {
913
			add_atomic_switch_msr_special(vmx,
914
915
916
917
918
919
920
921
					VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
					VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
					GUEST_IA32_PERF_GLOBAL_CTRL,
					HOST_IA32_PERF_GLOBAL_CTRL,
					guest_val, host_val);
			return;
		}
		break;
922
923
924
925
926
927
928
	case MSR_IA32_PEBS_ENABLE:
		/* PEBS needs a quiescent period after being disabled (to write
		 * a record).  Disabling PEBS through VMX MSR swapping doesn't
		 * provide that period, so a CPU could write host's record into
		 * guest's memory.
		 */
		wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
929
930
	}

931
	i = vmx_find_msr_index(&m->guest, msr);
932
	if (!entry_only)
933
		j = vmx_find_msr_index(&m->host, msr);
934

935
936
	if ((i < 0 && m->guest.nr == NR_LOADSTORE_MSRS) ||
		(j < 0 &&  m->host.nr == NR_LOADSTORE_MSRS)) {
937
		printk_once(KERN_WARNING "Not enough msr switch entries. "
938
939
				"Can't add msr %x\n", msr);
		return;
940
	}
941
	if (i < 0) {
942
		i = m->guest.nr++;
943
		vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
944
	}
945
946
947
948
949
	m->guest.val[i].index = msr;
	m->guest.val[i].value = guest_val;

	if (entry_only)
		return;
950

951
952
	if (j < 0) {
		j = m->host.nr++;
953
		vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
954
	}
955
956
	m->host.val[j].index = msr;
	m->host.val[j].value = host_val;
957
958
}

959
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
960
{
961
962
963
	u64 guest_efer = vmx->vcpu.arch.efer;
	u64 ignore_bits = 0;

964
965
966
	/* Shadow paging assumes NX to be available.  */
	if (!enable_ept)
		guest_efer |= EFER_NX;
967

968
	/*
969
	 * LMA and LME handled by hardware; SCE meaningless outside long mode.
970
	 */
971
	ignore_bits |= EFER_SCE;
972
973
974
975
976
977
#ifdef CONFIG_X86_64
	ignore_bits |= EFER_LMA | EFER_LME;
	/* SCE is meaningful only in long mode on Intel */
	if (guest_efer & EFER_LMA)
		ignore_bits &= ~(u64)EFER_SCE;
#endif
978

979
980
981
982
983
	/*
	 * On EPT, we can't emulate NX, so we must switch EFER atomically.
	 * On CPUs that support "load IA32_EFER", always switch EFER
	 * atomically, since it's faster than switching it manually.
	 */
984
	if (cpu_has_load_ia32_efer() ||
985
	    (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
986
987
		if (!(guest_efer & EFER_LMA))
			guest_efer &= ~EFER_LME;
988
989
		if (guest_efer != host_efer)
			add_atomic_switch_msr(vmx, MSR_EFER,
990
					      guest_efer, host_efer, false);
991
992
		else
			clear_atomic_switch_msr(vmx, MSR_EFER);
993
		return false;
994
	} else {
995
996
		clear_atomic_switch_msr(vmx, MSR_EFER);

997
998
999
1000
1001
		guest_efer &= ~ignore_bits;
		guest_efer |= host_efer & ignore_bits;

		vmx->guest_msrs[efer_offset].data = guest_efer;
		vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
1002

1003
1004
		return true;
	}
1005
1006
}

1007
1008
1009
1010
1011
1012
#ifdef CONFIG_X86_32
/*
 * On 32-bit kernels, VM exits still load the FS and GS bases from the
 * VMCS rather than the segment table.  KVM uses this helper to figure
 * out the current bases to poke them into the VMCS before entry.
 */
1013
1014
static unsigned long segment_base(u16 selector)
{
1015
	struct desc_struct *table;
1016
1017
	unsigned long v;

1018
	if (!(selector & ~SEGMENT_RPL_MASK))
1019
1020
		return 0;

1021
	table = get_current_gdt_ro();
1022

1023
	if ((selector & SEGMENT_TI_MASK) == SEGMENT_LDT) {
1024
1025
		u16 ldt_selector = kvm_read_ldt();

1026
		if (!(ldt_selector & ~SEGMENT_RPL_MASK))
1027
1028
			return 0;

1029
		table = (struct desc_struct *)segment_base(ldt_selector);
1030
	}
1031
	v = get_desc_base(&table[selector >> 3]);
1032
1033
	return v;
}
1034
#endif
1035

1036
1037
static inline bool pt_can_write_msr(struct vcpu_vmx *vmx)
{
1038
	return vmx_pt_mode_is_host_guest() &&
1039
1040
1041
	       !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN);
}

1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
static inline void pt_load_msr(struct pt_ctx *ctx, u32 addr_range)
{
	u32 i;

	wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
	wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
	wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
	wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
	for (i = 0; i < addr_range; i++) {
		wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
		wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
	}
}

static inline void pt_save_msr(struct pt_ctx *ctx, u32 addr_range)
{
	u32 i;

	rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status);
	rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base);
	rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask);
	rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match);
	for (i = 0; i < addr_range; i++) {
		rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]);
		rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]);
	}
}

static void pt_guest_enter(struct vcpu_vmx *vmx)
{
1072
	if (vmx_pt_mode_is_system())
1073
1074
1075
		return;

	/*
1076
1077
	 * GUEST_IA32_RTIT_CTL is already set in the VMCS.
	 * Save host state before VM entry.
1078
	 */
1079
	rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
1080
1081
1082
1083
1084
1085
1086
1087
1088
	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
		wrmsrl(MSR_IA32_RTIT_CTL, 0);
		pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
		pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
	}
}

static void pt_guest_exit(struct vcpu_vmx *vmx)
{
1089
	if (vmx_pt_mode_is_system())
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
		return;

	if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) {
		pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range);
		pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range);
	}

	/* Reload host state (IA32_RTIT_CTL will be cleared on VM exit). */
	wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
}

1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
			unsigned long fs_base, unsigned long gs_base)
{
	if (unlikely(fs_sel != host->fs_sel)) {
		if (!(fs_sel & 7))
			vmcs_write16(HOST_FS_SELECTOR, fs_sel);
		else
			vmcs_write16(HOST_FS_SELECTOR, 0);
		host->fs_sel = fs_sel;
	}
	if (unlikely(gs_sel != host->gs_sel)) {
		if (!(gs_sel & 7))
			vmcs_write16(HOST_GS_SELECTOR, gs_sel);
		else
			vmcs_write16(HOST_GS_SELECTOR, 0);
		host->gs_sel = gs_sel;
	}
	if (unlikely(fs_base != host->fs_base)) {
		vmcs_writel(HOST_FS_BASE, fs_base);
		host->fs_base = fs_base;
	}
	if (unlikely(gs_base != host->gs_base)) {
		vmcs_writel(HOST_GS_BASE, gs_base);
		host->gs_base = gs_base;
	}
}

1128
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
1129
{
1130
	struct vcpu_vmx *vmx = to_vmx(vcpu);
1131
	struct vmcs_host_state *host_state;
1132
#ifdef CONFIG_X86_64
1133
	int cpu = raw_smp_processor_id();
1134
#endif
1135
1136
	unsigned long fs_base, gs_base;
	u16 fs_sel, gs_sel;
1137
	int i;
1138

1139
1140
	vmx->req_immediate_exit = false;

1141
1142
1143
1144
1145
	/*
	 * Note that guest MSRs to be saved/restored can also be changed
	 * when guest state is loaded. This happens when guest transitions
	 * to/from long-mode by setting MSR_EFER.LMA.
	 */
1146
1147
	if (!vmx->guest_msrs_ready) {
		vmx->guest_msrs_ready = true;
1148
1149
1150
1151
1152
1153
		for (i = 0; i < vmx->save_nmsrs; ++i)
			kvm_set_shared_msr(vmx->guest_msrs[i].index,
					   vmx->guest_msrs[i].data,
					   vmx->guest_msrs[i].mask);

	}
1154
1155
1156
1157

    	if (vmx->nested.need_vmcs12_to_shadow_sync)
		nested_sync_vmcs12_to_shadow(vcpu);

1158
	if (vmx->guest_state_loaded)
1159
1160
		return;

1161
	host_state = &vmx->loaded_vmcs->host_state;