svm.c 108 KB
Newer Older
1
2
#define pr_fmt(fmt) "SVM: " fmt

3
4
#include <linux/kvm_host.h>

5
#include "irq.h"
6
#include "mmu.h"
7
#include "kvm_cache_regs.h"
8
#include "x86.h"
9
#include "cpuid.h"
10
#include "pmu.h"
Avi Kivity's avatar
Avi Kivity committed
11

Avi Kivity's avatar
Avi Kivity committed
12
#include <linux/module.h>
13
#include <linux/mod_devicetable.h>
14
#include <linux/kernel.h>
Avi Kivity's avatar
Avi Kivity committed
15
16
#include <linux/vmalloc.h>
#include <linux/highmem.h>
17
#include <linux/amd-iommu.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
18
#include <linux/sched.h>
19
#include <linux/trace_events.h>
20
#include <linux/slab.h>
21
#include <linux/hashtable.h>
22
#include <linux/frame.h>
Brijesh Singh's avatar
Brijesh Singh committed
23
#include <linux/psp-sev.h>
24
#include <linux/file.h>
25
26
#include <linux/pagemap.h>
#include <linux/swap.h>
27
#include <linux/rwsem.h>
Avi Kivity's avatar
Avi Kivity committed
28

29
#include <asm/apic.h>
30
#include <asm/perf_event.h>
31
#include <asm/tlbflush.h>
Avi Kivity's avatar
Avi Kivity committed
32
#include <asm/desc.h>
33
#include <asm/debugreg.h>
34
#include <asm/kvm_para.h>
35
#include <asm/irq_remapping.h>
36
#include <asm/mce.h>
37
#include <asm/spec-ctrl.h>
38
#include <asm/cpu_device_id.h>
Avi Kivity's avatar
Avi Kivity committed
39

40
#include <asm/virtext.h>
41
#include "trace.h"
42

43
44
#include "svm.h"

45
46
#define __ex(x) __kvm_handle_fault_on_reboot(x)

Avi Kivity's avatar
Avi Kivity committed
47
48
49
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

50
#ifdef MODULE
51
static const struct x86_cpu_id svm_cpu_id[] = {
52
	X86_MATCH_FEATURE(X86_FEATURE_SVM, NULL),
53
54
55
	{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
56
#endif
57

Avi Kivity's avatar
Avi Kivity committed
58
59
60
61
62
63
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1

#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3

64
65
#define SVM_FEATURE_LBRV           (1 <<  1)
#define SVM_FEATURE_SVML           (1 <<  2)
66
67
68
69
#define SVM_FEATURE_TSC_RATE       (1 <<  4)
#define SVM_FEATURE_VMCB_CLEAN     (1 <<  5)
#define SVM_FEATURE_FLUSH_ASID     (1 <<  6)
#define SVM_FEATURE_DECODE_ASSIST  (1 <<  7)
70
#define SVM_FEATURE_PAUSE_FILTER   (1 << 10)
71

72
73
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))

74
#define TSC_RATIO_RSVD          0xffffff0000000000ULL
75
76
#define TSC_RATIO_MIN		0x0000000000000001ULL
#define TSC_RATIO_MAX		0x000000ffffffffffULL
77

78
79
static bool erratum_383_found __read_mostly;

80
u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
81

82
83
84
85
86
87
/*
 * Set osvw_len to higher value when updated Revision Guides
 * are published and we know what the new status bits are
 */
static uint64_t osvw_len = 4, osvw_status;

88
89
90
static DEFINE_PER_CPU(u64, current_tsc_ratio);
#define TSC_RATIO_DEFAULT	0x0100000000ULL

91
static const struct svm_direct_access_msrs {
92
93
94
	u32 index;   /* Index of the MSR */
	bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
Brian Gerst's avatar
Brian Gerst committed
95
	{ .index = MSR_STAR,				.always = true  },
96
97
98
99
100
101
102
103
104
	{ .index = MSR_IA32_SYSENTER_CS,		.always = true  },
#ifdef CONFIG_X86_64
	{ .index = MSR_GS_BASE,				.always = true  },
	{ .index = MSR_FS_BASE,				.always = true  },
	{ .index = MSR_KERNEL_GS_BASE,			.always = true  },
	{ .index = MSR_LSTAR,				.always = true  },
	{ .index = MSR_CSTAR,				.always = true  },
	{ .index = MSR_SYSCALL_MASK,			.always = true  },
#endif
105
	{ .index = MSR_IA32_SPEC_CTRL,			.always = false },
Ashok Raj's avatar
Ashok Raj committed
106
	{ .index = MSR_IA32_PRED_CMD,			.always = false },
107
108
109
110
111
	{ .index = MSR_IA32_LASTBRANCHFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTBRANCHTOIP,		.always = false },
	{ .index = MSR_IA32_LASTINTFROMIP,		.always = false },
	{ .index = MSR_IA32_LASTINTTOIP,		.always = false },
	{ .index = MSR_INVALID,				.always = false },
112
113
};

114
115
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
116
bool npt_enabled = true;
117
#else
118
bool npt_enabled;
119
#endif
120

121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
/*
 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
 * pause_filter_count: On processors that support Pause filtering(indicated
 *	by CPUID Fn8000_000A_EDX), the VMCB provides a 16 bit pause filter
 *	count value. On VMRUN this value is loaded into an internal counter.
 *	Each time a pause instruction is executed, this counter is decremented
 *	until it reaches zero at which time a #VMEXIT is generated if pause
 *	intercept is enabled. Refer to  AMD APM Vol 2 Section 15.14.4 Pause
 *	Intercept Filtering for more details.
 *	This also indicate if ple logic enabled.
 *
 * pause_filter_thresh: In addition, some processor families support advanced
 *	pause filtering (indicated by CPUID Fn8000_000A_EDX) upper bound on
 *	the amount of time a guest is allowed to execute in a pause loop.
 *	In this mode, a 16-bit pause filter threshold field is added in the
 *	VMCB. The threshold value is a cycle count that is used to reset the
 *	pause counter. As with simple pause filtering, VMRUN loads the pause
 *	count value from VMCB into an internal counter. Then, on each pause
 *	instruction the hardware checks the elapsed number of cycles since
 *	the most recent pause instruction against the pause filter threshold.
 *	If the elapsed cycle count is greater than the pause filter threshold,
 *	then the internal pause count is reloaded from the VMCB and execution
 *	continues. If the elapsed cycle count is less than the pause filter
 *	threshold, then the internal pause count is decremented. If the count
 *	value is less than zero and PAUSE intercept is enabled, a #VMEXIT is
 *	triggered. If advanced pause filtering is supported and pause filter
 *	threshold field is set to zero, the filter will operate in the simpler,
 *	count only mode.
 */

static unsigned short pause_filter_thresh = KVM_DEFAULT_PLE_GAP;
module_param(pause_filter_thresh, ushort, 0444);

static unsigned short pause_filter_count = KVM_SVM_DEFAULT_PLE_WINDOW;
module_param(pause_filter_count, ushort, 0444);

/* Default doubles per-vcpu window every exit. */
static unsigned short pause_filter_count_grow = KVM_DEFAULT_PLE_WINDOW_GROW;
module_param(pause_filter_count_grow, ushort, 0444);

/* Default resets per-vcpu window every exit to pause_filter_count. */
static unsigned short pause_filter_count_shrink = KVM_DEFAULT_PLE_WINDOW_SHRINK;
module_param(pause_filter_count_shrink, ushort, 0444);

/* Default is to compute the maximum so we can never overflow. */
static unsigned short pause_filter_count_max = KVM_SVM_DEFAULT_PLE_WINDOW_MAX;
module_param(pause_filter_count_max, ushort, 0444);

169
170
/* allow nested paging (virtualized MMU) for all guests */
static int npt = true;
171
module_param(npt, int, S_IRUGO);
172

173
174
/* allow nested virtualization in KVM/SVM */
static int nested = true;
175
176
module_param(nested, int, S_IRUGO);

177
178
179
180
/* enable/disable Next RIP Save */
static int nrips = true;
module_param(nrips, int, 0444);

181
182
183
184
/* enable/disable Virtual VMLOAD VMSAVE */
static int vls = true;
module_param(vls, int, 0444);

185
186
187
/* enable/disable Virtual GIF */
static int vgif = true;
module_param(vgif, int, 0444);
188

Brijesh Singh's avatar
Brijesh Singh committed
189
190
191
192
/* enable/disable SEV support */
static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
module_param(sev, int, 0444);

193
194
195
static bool __read_mostly dump_invalid_vmcb = 0;
module_param(dump_invalid_vmcb, bool, 0644);

196
197
static u8 rsm_ins_bytes[] = "\x0f\xaa";

198
static void svm_complete_interrupts(struct vcpu_svm *svm);
199

200
static unsigned long iopm_base;
Avi Kivity's avatar
Avi Kivity committed
201
202
203
204

struct kvm_ldttss_desc {
	u16 limit0;
	u16 base0;
Joerg Roedel's avatar
Joerg Roedel committed
205
206
	unsigned base1:8, type:5, dpl:2, p:1;
	unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity's avatar
Avi Kivity committed
207
208
209
210
	u32 base3;
	u32 zero1;
} __attribute__((packed));

211
DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Avi Kivity's avatar
Avi Kivity committed
212

213
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity's avatar
Avi Kivity committed
214

215
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity's avatar
Avi Kivity committed
216
217
218
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)

219
u32 svm_msrpm_offset(u32 msr)
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
{
	u32 offset;
	int i;

	for (i = 0; i < NUM_MSR_MAPS; i++) {
		if (msr < msrpm_ranges[i] ||
		    msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
			continue;

		offset  = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
		offset += (i * MSRS_RANGE_SIZE);       /* add range offset */

		/* Now we have the u8 offset - but need the u32 offset */
		return offset / 4;
	}

	/* MSR not in any range */
	return MSR_INVALID;
}

Avi Kivity's avatar
Avi Kivity committed
240
241
242
243
#define MAX_INST_SIZE 15

static inline void clgi(void)
{
244
	asm volatile (__ex("clgi"));
Avi Kivity's avatar
Avi Kivity committed
245
246
247
248
}

static inline void stgi(void)
{
249
	asm volatile (__ex("stgi"));
Avi Kivity's avatar
Avi Kivity committed
250
251
252
253
}

static inline void invlpga(unsigned long addr, u32 asid)
{
254
	asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr));
Avi Kivity's avatar
Avi Kivity committed
255
256
}

257
static int get_npt_level(struct kvm_vcpu *vcpu)
258
259
{
#ifdef CONFIG_X86_64
260
	return PT64_ROOT_4LEVEL;
261
262
263
264
265
#else
	return PT32E_ROOT_LEVEL;
#endif
}

266
void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
Avi Kivity's avatar
Avi Kivity committed
267
{
268
	struct vcpu_svm *svm = to_svm(vcpu);
269
	vcpu->arch.efer = efer;
270
271
272
273
274
275
276
277

	if (!npt_enabled) {
		/* Shadow paging assumes NX to be available.  */
		efer |= EFER_NX;

		if (!(efer & EFER_LMA))
			efer &= ~EFER_LME;
	}
Avi Kivity's avatar
Avi Kivity committed
278

279
280
281
282
283
284
285
	if (!(efer & EFER_SVME)) {
		svm_leave_nested(svm);
		svm_set_gif(svm, true);
	}

	svm->vmcb->save.efer = efer | EFER_SVME;
	mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivity's avatar
Avi Kivity committed
286
287
288
289
290
291
292
293
}

static int is_external_interrupt(u32 info)
{
	info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
	return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
}

294
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
295
296
297
298
299
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u32 ret = 0;

	if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
300
301
		ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
	return ret;
302
303
304
305
306
307
308
309
310
311
312
313
314
}

static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
	struct vcpu_svm *svm = to_svm(vcpu);

	if (mask == 0)
		svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
	else
		svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;

}

315
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
Avi Kivity's avatar
Avi Kivity committed
316
{
317
318
	struct vcpu_svm *svm = to_svm(vcpu);

319
	if (nrips && svm->vmcb->control.next_rip != 0) {
320
		WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
321
		svm->next_rip = svm->vmcb->control.next_rip;
322
	}
323

324
325
326
327
328
329
	if (!svm->next_rip) {
		if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
			return 0;
	} else {
		kvm_rip_write(vcpu, svm->next_rip);
	}
330
	svm_set_interrupt_shadow(vcpu, 0);
331

332
	return 1;
Avi Kivity's avatar
Avi Kivity committed
333
334
}

335
static void svm_queue_exception(struct kvm_vcpu *vcpu)
336
337
{
	struct vcpu_svm *svm = to_svm(vcpu);
338
339
340
	unsigned nr = vcpu->arch.exception.nr;
	bool has_error_code = vcpu->arch.exception.has_error_code;
	u32 error_code = vcpu->arch.exception.error_code;
341

342
343
	kvm_deliver_exception_payload(&svm->vcpu);

344
	if (nr == BP_VECTOR && !nrips) {
345
346
347
348
349
350
351
352
353
		unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);

		/*
		 * For guest debugging where we have to reinject #BP if some
		 * INT3 is guest-owned:
		 * Emulate nRIP by moving RIP forward. Will fail if injection
		 * raises a fault that is not intercepted. Still better than
		 * failing in all cases.
		 */
354
		(void)skip_emulated_instruction(&svm->vcpu);
355
356
357
358
359
		rip = kvm_rip_read(&svm->vcpu);
		svm->int3_rip = rip + svm->vmcb->save.cs.base;
		svm->int3_injected = rip - old_rip;
	}

360
361
362
363
364
365
366
	svm->vmcb->control.event_inj = nr
		| SVM_EVTINJ_VALID
		| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
		| SVM_EVTINJ_TYPE_EXEPT;
	svm->vmcb->control.event_inj_err = error_code;
}

367
368
369
370
371
372
static void svm_init_erratum_383(void)
{
	u32 low, high;
	int err;
	u64 val;

373
	if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
		return;

	/* Use _safe variants to not break nested virtualization */
	val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
	if (err)
		return;

	val |= (1ULL << 47);

	low  = lower_32_bits(val);
	high = upper_32_bits(val);

	native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);

	erratum_383_found = true;
}

391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
static void svm_init_osvw(struct kvm_vcpu *vcpu)
{
	/*
	 * Guests should see errata 400 and 415 as fixed (assuming that
	 * HLT and IO instructions are intercepted).
	 */
	vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
	vcpu->arch.osvw.status = osvw_status & ~(6ULL);

	/*
	 * By increasing VCPU's osvw.length to 3 we are telling the guest that
	 * all osvw.status bits inside that length, including bit 0 (which is
	 * reserved for erratum 298), are valid. However, if host processor's
	 * osvw_len is 0 then osvw_status[0] carries no information. We need to
	 * be conservative here and therefore we tell the guest that erratum 298
	 * is present (because we really don't know).
	 */
	if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
		vcpu->arch.osvw.status |= 1;
}

Avi Kivity's avatar
Avi Kivity committed
412
413
static int has_svm(void)
{
414
	const char *msg;
Avi Kivity's avatar
Avi Kivity committed
415

416
	if (!cpu_has_svm(&msg)) {
Joe Perches's avatar
Joe Perches committed
417
		printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity's avatar
Avi Kivity committed
418
419
420
421
422
423
		return 0;
	}

	return 1;
}

424
static void svm_hardware_disable(void)
Avi Kivity's avatar
Avi Kivity committed
425
{
426
427
428
429
	/* Make sure we clean up behind us */
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);

430
	cpu_svm_disable();
431
432

	amd_pmu_disable_virt();
Avi Kivity's avatar
Avi Kivity committed
433
434
}

435
static int svm_hardware_enable(void)
Avi Kivity's avatar
Avi Kivity committed
436
437
{

438
	struct svm_cpu_data *sd;
Avi Kivity's avatar
Avi Kivity committed
439
440
441
442
	uint64_t efer;
	struct desc_struct *gdt;
	int me = raw_smp_processor_id();

443
444
445
446
	rdmsrl(MSR_EFER, efer);
	if (efer & EFER_SVME)
		return -EBUSY;

Avi Kivity's avatar
Avi Kivity committed
447
	if (!has_svm()) {
448
		pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
449
		return -EINVAL;
Avi Kivity's avatar
Avi Kivity committed
450
	}
451
452
	sd = per_cpu(svm_data, me);
	if (!sd) {
453
		pr_err("%s: svm_data is NULL on %d\n", __func__, me);
454
		return -EINVAL;
Avi Kivity's avatar
Avi Kivity committed
455
456
	}

457
458
459
	sd->asid_generation = 1;
	sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
	sd->next_asid = sd->max_asid + 1;
460
	sd->min_asid = max_sev_asid + 1;
Avi Kivity's avatar
Avi Kivity committed
461

462
	gdt = get_current_gdt_rw();
463
	sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity's avatar
Avi Kivity committed
464

465
	wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity's avatar
Avi Kivity committed
466

467
	wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
468

469
470
	if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
471
		__this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
472
473
	}

474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503

	/*
	 * Get OSVW bits.
	 *
	 * Note that it is possible to have a system with mixed processor
	 * revisions and therefore different OSVW bits. If bits are not the same
	 * on different processors then choose the worst case (i.e. if erratum
	 * is present on one processor and not on another then assume that the
	 * erratum is present everywhere).
	 */
	if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
		uint64_t len, status = 0;
		int err;

		len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
		if (!err)
			status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
						      &err);

		if (err)
			osvw_status = osvw_len = 0;
		else {
			if (len < osvw_len)
				osvw_len = len;
			osvw_status |= status;
			osvw_status &= (1ULL << osvw_len) - 1;
		}
	} else
		osvw_status = osvw_len = 0;

504
505
	svm_init_erratum_383();

506
507
	amd_pmu_enable_virt();

508
	return 0;
Avi Kivity's avatar
Avi Kivity committed
509
510
}

511
512
static void svm_cpu_uninit(int cpu)
{
513
	struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
514

515
	if (!sd)
516
517
518
		return;

	per_cpu(svm_data, raw_smp_processor_id()) = NULL;
519
	kfree(sd->sev_vmcbs);
520
521
	__free_page(sd->save_area);
	kfree(sd);
522
523
}

Avi Kivity's avatar
Avi Kivity committed
524
525
static int svm_cpu_init(int cpu)
{
526
	struct svm_cpu_data *sd;
Avi Kivity's avatar
Avi Kivity committed
527

528
529
	sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
	if (!sd)
Avi Kivity's avatar
Avi Kivity committed
530
		return -ENOMEM;
531
	sd->cpu = cpu;
532
	sd->save_area = alloc_page(GFP_KERNEL);
533
	if (!sd->save_area)
534
		goto free_cpu_data;
Avi Kivity's avatar
Avi Kivity committed
535

536
	if (svm_sev_enabled()) {
537
538
539
		sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
					      sizeof(void *),
					      GFP_KERNEL);
540
		if (!sd->sev_vmcbs)
541
			goto free_save_area;
542
543
	}

544
	per_cpu(svm_data, cpu) = sd;
Avi Kivity's avatar
Avi Kivity committed
545
546
547

	return 0;

548
549
550
free_save_area:
	__free_page(sd->save_area);
free_cpu_data:
551
	kfree(sd);
552
	return -ENOMEM;
Avi Kivity's avatar
Avi Kivity committed
553
554
555

}

556
557
558
559
560
561
562
563
564
565
566
static bool valid_msr_intercept(u32 index)
{
	int i;

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
		if (direct_access_msrs[i].index == index)
			return true;

	return false;
}

567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
{
	u8 bit_write;
	unsigned long tmp;
	u32 offset;
	u32 *msrpm;

	msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
				      to_svm(vcpu)->msrpm;

	offset    = svm_msrpm_offset(msr);
	bit_write = 2 * (msr & 0x0f) + 1;
	tmp       = msrpm[offset];

	BUG_ON(offset == MSR_INVALID);

	return !!test_bit(bit_write,  &tmp);
}

586
587
static void set_msr_interception(u32 *msrpm, unsigned msr,
				 int read, int write)
Avi Kivity's avatar
Avi Kivity committed
588
{
589
590
591
	u8 bit_read, bit_write;
	unsigned long tmp;
	u32 offset;
Avi Kivity's avatar
Avi Kivity committed
592

593
594
595
596
597
598
	/*
	 * If this warning triggers extend the direct_access_msrs list at the
	 * beginning of the file
	 */
	WARN_ON(!valid_msr_intercept(msr));

599
600
601
602
603
604
605
606
607
608
609
	offset    = svm_msrpm_offset(msr);
	bit_read  = 2 * (msr & 0x0f);
	bit_write = 2 * (msr & 0x0f) + 1;
	tmp       = msrpm[offset];

	BUG_ON(offset == MSR_INVALID);

	read  ? clear_bit(bit_read,  &tmp) : set_bit(bit_read,  &tmp);
	write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);

	msrpm[offset] = tmp;
Avi Kivity's avatar
Avi Kivity committed
610
611
}

612
static void svm_vcpu_init_msrpm(u32 *msrpm)
Avi Kivity's avatar
Avi Kivity committed
613
614
615
{
	int i;

616
617
	memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));

618
619
620
621
622
623
	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		if (!direct_access_msrs[i].always)
			continue;

		set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
	}
624
625
}

626
627
628
629
630
631
632
633
static void add_msr_offset(u32 offset)
{
	int i;

	for (i = 0; i < MSRPM_OFFSETS; ++i) {

		/* Offset already in list? */
		if (msrpm_offsets[i] == offset)
634
			return;
635
636
637
638
639
640
641
642
643

		/* Slot used by another offset? */
		if (msrpm_offsets[i] != MSR_INVALID)
			continue;

		/* Add offset to list */
		msrpm_offsets[i] = offset;

		return;
Avi Kivity's avatar
Avi Kivity committed
644
	}
645
646
647
648
649

	/*
	 * If this BUG triggers the msrpm_offsets table has an overflow. Just
	 * increase MSRPM_OFFSETS in this case.
	 */
650
	BUG();
Avi Kivity's avatar
Avi Kivity committed
651
652
}

653
static void init_msrpm_offsets(void)
654
{
655
	int i;
656

657
658
659
660
661
662
663
664
665
666
	memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));

	for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
		u32 offset;

		offset = svm_msrpm_offset(direct_access_msrs[i].index);
		BUG_ON(offset == MSR_INVALID);

		add_msr_offset(offset);
	}
667
668
}

669
670
671
672
static void svm_enable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

673
	svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
674
675
676
677
678
679
680
681
682
683
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}

static void svm_disable_lbrv(struct vcpu_svm *svm)
{
	u32 *msrpm = svm->msrpm;

684
	svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
685
686
687
688
689
690
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
	set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}

691
void disable_nmi_singlestep(struct vcpu_svm *svm)
692
693
{
	svm->nmi_singlestep = false;
694

695
696
697
698
699
700
701
	if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
		/* Clear our flags if they were not set by the guest */
		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
			svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
		if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
			svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
	}
702
703
}

704
705
706
707
708
709
710
711
712
713
714
static void grow_ple_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	int old = control->pause_filter_count;

	control->pause_filter_count = __grow_ple_window(old,
							pause_filter_count,
							pause_filter_count_grow,
							pause_filter_count_max);

715
	if (control->pause_filter_count != old) {
716
		mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
717
718
719
		trace_kvm_ple_window_update(vcpu->vcpu_id,
					    control->pause_filter_count, old);
	}
720
721
722
723
724
725
726
727
728
729
730
731
732
}

static void shrink_ple_window(struct kvm_vcpu *vcpu)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	struct vmcb_control_area *control = &svm->vmcb->control;
	int old = control->pause_filter_count;

	control->pause_filter_count =
				__shrink_ple_window(old,
						    pause_filter_count,
						    pause_filter_count_shrink,
						    pause_filter_count);
733
	if (control->pause_filter_count != old) {
734
		mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
735
736
737
		trace_kvm_ple_window_update(vcpu->vcpu_id,
					    control->pause_filter_count, old);
	}
738
739
}

740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
/*
 * The default MMIO mask is a single bit (excluding the present bit),
 * which could conflict with the memory encryption bit. Check for
 * memory encryption support and override the default MMIO mask if
 * memory encryption is enabled.
 */
static __init void svm_adjust_mmio_mask(void)
{
	unsigned int enc_bit, mask_bit;
	u64 msr, mask;

	/* If there is no memory encryption support, use existing mask */
	if (cpuid_eax(0x80000000) < 0x8000001f)
		return;

	/* If memory encryption is not enabled, use existing mask */
	rdmsrl(MSR_K8_SYSCFG, msr);
	if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
		return;

	enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
	mask_bit = boot_cpu_data.x86_phys_bits;

	/* Increment the mask bit if it is the same as the encryption bit */
	if (enc_bit == mask_bit)
		mask_bit++;

	/*
	 * If the mask bit location is below 52, then some bits above the
	 * physical addressing limit will always be reserved, so use the
	 * rsvd_bits() function to generate the mask. This mask, along with
	 * the present bit, will be used to generate a page fault with
	 * PFER.RSV = 1.
	 *
	 * If the mask bit location is 52 (or above), then clear the mask.
	 */
	mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;

778
	kvm_mmu_set_mmio_spte_mask(mask, PT_WRITABLE_MASK | PT_USER_MASK);
779
780
}

781
782
783
784
static void svm_hardware_teardown(void)
{
	int cpu;

785
786
	if (svm_sev_enabled())
		sev_hardware_teardown();
787
788
789
790
791
792
793
794

	for_each_possible_cpu(cpu)
		svm_cpu_uninit(cpu);

	__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
	iopm_base = 0;
}

795
796
797
798
static __init void svm_set_cpu_caps(void)
{
	kvm_set_cpu_caps();

799
800
	supported_xss = 0;

801
802
	/* CPUID 0x80000001 and 0x8000000A (SVM features) */
	if (nested) {
803
804
		kvm_cpu_cap_set(X86_FEATURE_SVM);

805
		if (nrips)
806
807
808
809
810
811
			kvm_cpu_cap_set(X86_FEATURE_NRIPS);

		if (npt_enabled)
			kvm_cpu_cap_set(X86_FEATURE_NPT);
	}

812
813
814
815
	/* CPUID 0x80000008 */
	if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
	    boot_cpu_has(X86_FEATURE_AMD_SSBD))
		kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
816
817
}

Avi Kivity's avatar
Avi Kivity committed
818
819
820
821
static __init int svm_hardware_setup(void)
{
	int cpu;
	struct page *iopm_pages;
822
	void *iopm_va;
Avi Kivity's avatar
Avi Kivity committed
823
824
825
826
827
828
	int r;

	iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);

	if (!iopm_pages)
		return -ENOMEM;
829
830
831

	iopm_va = page_address(iopm_pages);
	memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity's avatar
Avi Kivity committed
832
833
	iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;

834
835
	init_msrpm_offsets();

836
837
	supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);

838
839
840
	if (boot_cpu_has(X86_FEATURE_NX))
		kvm_enable_efer_bits(EFER_NX);

Alexander Graf's avatar
Alexander Graf committed
841
842
843
	if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
		kvm_enable_efer_bits(EFER_FFXSR);

844
845
	if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
		kvm_has_tsc_control = true;
846
847
		kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
		kvm_tsc_scaling_ratio_frac_bits = 32;
848
849
	}

850
851
852
853
854
855
856
857
	/* Check for pause filtering support */
	if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
		pause_filter_count = 0;
		pause_filter_thresh = 0;
	} else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
		pause_filter_thresh = 0;
	}

858
859
	if (nested) {
		printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
860
		kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
861
862
	}

Brijesh Singh's avatar
Brijesh Singh committed
863
864
865
866
867
868
869
870
871
872
873
	if (sev) {
		if (boot_cpu_has(X86_FEATURE_SEV) &&
		    IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
			r = sev_hardware_setup();
			if (r)
				sev = false;
		} else {
			sev = false;
		}
	}

874
875
	svm_adjust_mmio_mask();

Zachary Amsden's avatar
Zachary Amsden committed
876
	for_each_possible_cpu(cpu) {
Avi Kivity's avatar
Avi Kivity committed
877
878
		r = svm_cpu_init(cpu);
		if (r)
879
			goto err;
Avi Kivity's avatar
Avi Kivity committed
880
	}
881

882
	if (!boot_cpu_has(X86_FEATURE_NPT))
883
884
		npt_enabled = false;

885
	if (npt_enabled && !npt)
886
887
		npt_enabled = false;

888
	kvm_configure_mmu(npt_enabled, PG_LEVEL_1G);
889
	pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
890

891
892
893
894
895
	if (nrips) {
		if (!boot_cpu_has(X86_FEATURE_NRIPS))
			nrips = false;
	}

896
897
898
	if (avic) {
		if (!npt_enabled ||
		    !boot_cpu_has(X86_FEATURE_AVIC) ||
899
		    !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
900
			avic = false;
901
		} else {
902
			pr_info("AVIC enabled\n");
903
904
905

			amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
		}
906
	}
907

908
909
	if (vls) {
		if (!npt_enabled ||
910
		    !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
911
912
913
914
915
916
917
		    !IS_ENABLED(CONFIG_X86_64)) {
			vls = false;
		} else {
			pr_info("Virtual VMLOAD VMSAVE supported\n");
		}
	}

918
919
920
921
922
923
924
	if (vgif) {
		if (!boot_cpu_has(X86_FEATURE_VGIF))
			vgif = false;
		else
			pr_info("Virtual GIF supported\n");
	}

925
	svm_set_cpu_caps();
926

Avi Kivity's avatar
Avi Kivity committed
927
928
	return 0;

929
err:
930
	svm_hardware_teardown();
Avi Kivity's avatar
Avi Kivity committed
931
932
933
934
935
936
937
	return r;
}

static void init_seg(struct vmcb_seg *seg)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedel's avatar
Joerg Roedel committed
938
		      SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity's avatar
Avi Kivity committed
939
940
941
942
943
944
945
946
947
948
949
950
	seg->limit = 0xffff;
	seg->base = 0;
}

static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
{
	seg->selector = 0;
	seg->attrib = SVM_SELECTOR_P_MASK | type;
	seg->limit = 0xffff;
	seg->base = 0;
}

951
static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
952
953
954
955
{
	struct vcpu_svm *svm = to_svm(vcpu);
	u64 g_tsc_offset = 0;

956
	if (is_guest_mode(vcpu)) {
957
		/* Write L1's TSC offset.  */
958
959
960
		g_tsc_offset = svm->vmcb->control.tsc_offset -
			       svm->nested.hsave->control.tsc_offset;
		svm->nested.hsave->control.tsc_offset = offset;
961
962
963
964
965
	}

	trace_kvm_write_tsc_offset(vcpu->vcpu_id,
				   svm->vmcb->control.tsc_offset - g_tsc_offset,
				   offset);
966
967

	svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
968
969

	mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
970
	return svm->vmcb->control.tsc_offset;
971
972
}

Paolo Bonzini's avatar
Paolo Bonzini committed
973
static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity's avatar
Avi Kivity committed
974
{
975
976
	struct vmcb_control_area *control = &svm->vmcb->control;
	struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity's avatar
Avi Kivity committed
977

978
	svm->vcpu.arch.hflags = 0;
979

980
981
982
983
984
985
	set_cr_intercept(svm, INTERCEPT_CR0_READ);
	set_cr_intercept(svm, INTERCEPT_CR3_READ);
	set_cr_intercept(svm, INTERCEPT_CR4_READ);
	set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
	set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
986
987
	if (!kvm_vcpu_apicv_active(&svm->vcpu))
		set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity's avatar
Avi Kivity committed
988

989
	set_dr_intercepts(svm);
Avi Kivity's avatar
Avi Kivity committed
990

991
992
993
	set_exception_intercept(svm, PF_VECTOR);
	set_exception_intercept(svm, UD_VECTOR);
	set_exception_intercept(svm, MC_VECTOR);
994
	set_exception_intercept(svm, AC_VECTOR);
995
	set_exception_intercept(svm, DB_VECTOR);
996
997
998
999
1000
1001
1002
1003
	/*
	 * Guest access to VMware backdoor ports could legitimately
	 * trigger #GP because of TSS I/O permission bitmap.
	 * We intercept those #GP and allow access to them anyway
	 * as VMware does.
	 */
	if (enable_vmware_backdoor)
		set_exception_intercept(svm, GP_VECTOR);
Avi Kivity's avatar
Avi Kivity committed
1004

1005
1006
1007
1008
	set_intercept(svm, INTERCEPT_INTR);
	set_intercept(svm, INTERCEPT_NMI);
	set_intercept(svm, INTERCEPT_SMI);
	set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity's avatar
Avi Kivity committed
1009
	set_intercept(svm, INTERCEPT_RDPMC);
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
	set_intercept(svm, INTERCEPT_CPUID);
	set_intercept(svm, INTERCEPT_INVD);
	set_intercept(svm, INTERCEPT_INVLPG);
	set_intercept(svm, INTERCEPT_INVLPGA);
	set_intercept(svm, INTERCEPT_IOIO_PROT);
	set_intercept(svm, INTERCEPT_MSR_PROT);
	set_intercept(svm, INTERCEPT_TASK_SWITCH);
	set_intercept(svm, INTERCEPT_SHUTDOWN);
	set_intercept(svm, INTERCEPT_VMRUN);
	set_intercept(svm, INTERCEPT_VMMCALL);
	set_intercept(svm, INTERCEPT_VMLOAD);
	set_intercept(svm, INTERCEPT_VMSAVE);
	set_intercept(svm, INTERCEPT_STGI);
	set_intercept(svm, INTERCEPT_CLGI);
	set_intercept(svm, INTERCEPT_SKINIT);
	set_intercept(svm, INTERCEPT_WBINVD);
Joerg Roedel's avatar
Joerg Roedel committed
1026
	set_intercept(svm, INTERCEPT_XSETBV);
Jim Mattson's avatar
Jim Mattson committed
1027
	set_intercept(svm, INTERCEPT_RDPRU);
1028
	set_intercept(svm, INTERCEPT_RSM);
Avi Kivity's avatar
Avi Kivity committed
1029

1030
	if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
1031
1032
1033
1034
		set_intercept(svm, INTERCEPT_MONITOR);
		set_intercept(svm, INTERCEPT_MWAIT);
	}

1035
1036
1037
	if (!kvm_hlt_in_guest(svm->vcpu.kvm))
		set_intercept(svm, INTERCEPT_HLT);

1038
1039
	control->iopm_base_pa = __sme_set(iopm_base);
	control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
Avi Kivity's avatar
Avi Kivity committed
1040
1041
1042
1043
1044
1045
1046
1047
1048
	control->int_ctl = V_INTR_MASKING_MASK;

	init_seg(&save->es);
	init_seg(&save->ss);
	init_seg(&save->ds);
	init_seg(&save->fs);
	init_seg(&save->gs);

	save->cs.selector = 0xf000;
1049
	save->cs.base = 0xffff0000;
Avi Kivity's avatar
Avi Kivity committed
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
	/* Executable/Readable Code Segment */
	save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
		SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
	save->cs.limit = 0xffff;

	save->gdtr.limit = 0xffff;
	save->idtr.limit = 0xffff;

	init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
	init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);

Paolo Bonzini's avatar
Paolo Bonzini committed
1061
	svm_set_efer(&svm->vcpu, 0);
Mike Day's avatar
Mike Day committed
1062
	save->dr6 = 0xffff0ff0;
1063
	kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity's avatar
Avi Kivity committed
1064
	save->rip = 0x0000fff0;
1065
	svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity's avatar
Avi Kivity committed
1066

Joerg Roedel's avatar
Joerg Roedel committed
1067
	/*
1068
	 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
1069
	 * It also updates the guest-visible cr0 value.
Avi Kivity's avatar
Avi Kivity committed
1070
	 */
1071
	svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
1072
	kvm_mmu_reset_context(&svm->vcpu);
1073

1074
	save->cr4 = X86_CR4_PAE;
Avi Kivity's avatar
Avi Kivity committed
1075
	/* rdx = ?? */
1076
1077
1078

	if (npt_enabled) {
		/* Setup VMCB for Nested Paging */
1079
		control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
1080
		clr_intercept(svm, INTERCEPT_INVLPG);
1081
		clr_exception_intercept(svm, PF_VECTOR);
1082
1083
		clr_cr_intercept(svm, INTERCEPT_CR3_READ);
		clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1084
		save->g_pat = svm->vcpu.arch.pat;
1085
1086
1087
		save->cr3 = 0;
		save->cr4 = 0;
	}
1088
	svm->asid_generation = 0;
1089

1090
	svm->nested.vmcb = 0;
1091
1092
	svm->vcpu.arch.hflags = 0;

1093
1094
1095
1096
	if (pause_filter_count) {
		control->pause_filter_count = pause_filter_count;
		if (pause_filter_thresh)
			control->pause_filter_thresh = pause_filter_thresh;
1097
		set_intercept(svm, INTERCEPT_PAUSE);
1098
1099
	} else {
		clr_intercept(svm, INTERCEPT_PAUSE);
1100
1101
	}

1102
	if (kvm_vcpu_apicv_active(&svm->vcpu))
1103
1104
		avic_init_vmcb(svm);

1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
	/*
	 * If hardware supports Virtual VMLOAD VMSAVE then enable it
	 * in VMCB and clear intercepts to avoid #VMEXIT.
	 */
	if (vls) {
		clr_intercept(svm, INTERCEPT_VMLOAD);
		clr_intercept(svm, INTERCEPT_VMSAVE);
		svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
	}

1115
1116
1117
1118
1119
1120
	if (vgif) {
		clr_intercept(svm, INTERCEPT_STGI);
		clr_intercept(svm, INTERCEPT_CLGI);
		svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
	}

1121
	if (sev_guest(svm->vcpu.kvm)) {
1122
		svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
1123
1124
		clr_exception_intercept(svm, UD_VECTOR);
	}
1125

1126
1127
	mark_all_dirty(svm->vmcb);

1128
	enable_gif(svm);
1129
1130
1131

}

1132
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
1133
1134
{
	struct vcpu_svm *svm = to_svm(vcpu);
1135
1136
	u32 dummy;
	u32 eax = 1;
1137

1138
	svm->spec_ctrl = 0;
1139
	svm->virt_spec_ctrl = 0;
1140