Commit aa2eaa8c authored by David S. Miller's avatar David S. Miller

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Minor overlapping changes in the btusb and ixgbe drivers.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a3d3c74d 1609d760
......@@ -107,10 +107,13 @@ ForEachMacros:
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
- 'dma_fence_chain_for_each'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
......@@ -126,6 +129,7 @@ ForEachMacros:
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_available_child_of_node'
......@@ -153,6 +157,8 @@ ForEachMacros:
- 'for_each_cpu_not'
- 'for_each_cpu_wrap'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- 'for_each_displayid_db'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
......@@ -169,6 +175,8 @@ ForEachMacros:
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- 'for_each_free_mem_pfn_range_in_zone'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
......@@ -178,6 +186,7 @@ ForEachMacros:
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_link_codecs'
- 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
......@@ -302,7 +311,10 @@ ForEachMacros:
- 'ide_port_for_each_present_dev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu'
- 'key_for_each'
......@@ -343,8 +355,6 @@ ForEachMacros:
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'mp_bvec_for_each_page'
- 'mp_bvec_for_each_segment'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
......@@ -381,18 +391,19 @@ ForEachMacros:
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- 'rht_for_each_from'
- 'rht_for_each_entry'
- 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
- 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- '__rq_for_each_bio'
......
......@@ -5,7 +5,7 @@ Scope
-----
Hardware issues which result in security problems are a different category
of security bugs than pure software bugs which only affect the Linux
of security bugs than pure software bugs which only affect the Linux
kernel.
Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
......@@ -159,7 +159,7 @@ Mitigation development
The initial response team sets up an encrypted mailing-list or repurposes
an existing one if appropriate. The disclosing party should provide a list
of contacts for all other parties who have already been, or should be
of contacts for all other parties who have already been, or should be,
informed about the issue. The response team contacts these parties so they
can name experts who should be subscribed to the mailing-list.
......@@ -217,11 +217,11 @@ an involved disclosed party. The current ambassadors list:
AMD
IBM
Intel
Qualcomm
Qualcomm Trilok Soni <tsoni@codeaurora.org>
Microsoft
Microsoft Sasha Levin <sashal@kernel.org>
VMware
XEN
Xen Andrew Cooper <andrew.cooper3@citrix.com>
Canonical Tyler Hicks <tyhicks@canonical.com>
Debian Ben Hutchings <ben@decadent.org.uk>
......@@ -230,8 +230,8 @@ an involved disclosed party. The current ambassadors list:
SUSE Jiri Kosina <jkosina@suse.cz>
Amazon
Google
============== ========================================================
Google Kees Cook <keescook@chromium.org>
============= ========================================================
If you want your organization to be added to the ambassadors list, please
contact the hardware security team. The nominated ambassador has to
......
......@@ -18,7 +18,7 @@ The following 64-byte header is present in decompressed Linux kernel image.
u32 res1 = 0; /* Reserved */
u64 res2 = 0; /* Reserved */
u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
u32 res3; /* Reserved for additional RISC-V specific header */
u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */
u32 res4; /* Reserved for PE COFF offset */
This header format is compliant with PE/COFF header and largely inspired from
......@@ -37,13 +37,14 @@ Notes:
Bits 16:31 - Major version
This preserves compatibility across newer and older version of the header.
The current version is defined as 0.1.
The current version is defined as 0.2.
- res3 is reserved for offset to any other additional fields. This makes the
header extendible in future. One example would be to accommodate ISA
extension for RISC-V in future. For current version, it is set to be zero.
- The "magic" field is deprecated as of version 0.2. In a future
release, it may be removed. This originally should have matched up
with the ARM64 header "magic" field, but unfortunately does not.
The "magic2" field replaces it, matching up with the ARM64 header.
- In current header, the flag field has only one field.
- In current header, the flags field has only one field.
Bit 0: Kernel endianness. 1 if BE, 0 if LE.
- Image size is mandatory for boot loader to load kernel image. Booting will
......
......@@ -17732,8 +17732,7 @@ F: include/uapi/linux/dqblk_xfs.h
F: include/uapi/linux/fsmap.h
XILINX AXI ETHERNET DRIVER
M: Anirudha Sarangi <anirudh@xilinx.com>
M: John Linn <John.Linn@xilinx.com>
M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
S: Maintained
F: drivers/net/ethernet/xilinx/xilinx_axienet*
......
......@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 3
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Bobtail Squid
# *DOCUMENTATION*
......
......@@ -279,6 +279,7 @@
mmc-hs200-1_8v;
non-removable;
fixed-emmc-driver-type = <1>;
status = "okay";
};
&usb_extal_clk {
......
......@@ -97,7 +97,7 @@
reg = <0x0 0x48000000 0x0 0x18000000>;
};
reg_1p8v: regulator0 {
reg_1p8v: regulator-1p8v {
compatible = "regulator-fixed";
regulator-name = "fixed-1.8V";
regulator-min-microvolt = <1800000>;
......@@ -106,7 +106,7 @@
regulator-always-on;
};
reg_3p3v: regulator1 {
reg_3p3v: regulator-3p3v {
compatible = "regulator-fixed";
regulator-name = "fixed-3.3V";
regulator-min-microvolt = <3300000>;
......@@ -115,7 +115,7 @@
regulator-always-on;
};
reg_12p0v: regulator1 {
reg_12p0v: regulator-12p0v {
compatible = "regulator-fixed";
regulator-name = "D12.0V";
regulator-min-microvolt = <12000000>;
......
......@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
}
}
static bool tm_active_with_fp(struct task_struct *tsk)
{
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
(tsk->thread.ckpt_regs.msr & MSR_FP);
}
static bool tm_active_with_altivec(struct task_struct *tsk)
{
return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
(tsk->thread.ckpt_regs.msr & MSR_VEC);
}
#else
static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
bool strict_msr_control;
......@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
static int restore_fp(struct task_struct *tsk)
{
if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
if (tsk->thread.load_fp) {
load_fp_state(&current->thread.fp_state);
current->thread.load_fp++;
return 1;
......@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
static int restore_altivec(struct task_struct *tsk)
{
if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
(tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
load_vr_state(&tsk->thread.vr_state);
tsk->thread.used_vr = 1;
tsk->thread.load_vec++;
......@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
if (!tsk->thread.regs)
return;
check_if_tm_restore_required(tsk);
usermsr = tsk->thread.regs->msr;
if ((usermsr & msr_all_available) == 0)
return;
msr_check_and_set(msr_all_available);
check_if_tm_restore_required(tsk);
WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
......
......@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
#ifdef CONFIG_PPC_FSL_BOOK3E
if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
unsigned int num_cams;
int __maybe_unused cpu = smp_processor_id();
bool map = true;
/* use a quarter of the TLBCAM for bolted linear map */
......
......@@ -3,7 +3,8 @@
#ifndef __ASM_IMAGE_H
#define __ASM_IMAGE_H
#define RISCV_IMAGE_MAGIC "RISCV"
#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
#define RISCV_IMAGE_MAGIC2 "RSC\x05"
#define RISCV_IMAGE_FLAG_BE_SHIFT 0
#define RISCV_IMAGE_FLAG_BE_MASK 0x1
......@@ -23,7 +24,7 @@
#define __HEAD_FLAGS (__HEAD_FLAG(BE))
#define RISCV_HEADER_VERSION_MAJOR 0
#define RISCV_HEADER_VERSION_MINOR 1
#define RISCV_HEADER_VERSION_MINOR 2
#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
RISCV_HEADER_VERSION_MINOR)
......@@ -39,9 +40,8 @@
* @version: version
* @res1: reserved
* @res2: reserved
* @magic: Magic number
* @res3: reserved (will be used for additional RISC-V specific
* header)
* @magic: Magic number (RISC-V specific; deprecated)
* @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
* @res4: reserved (will be used for PE COFF offset)
*
* The intention is for this header format to be shared between multiple
......@@ -58,7 +58,7 @@ struct riscv_image_header {
u32 res1;
u64 res2;
u64 magic;
u32 res3;
u32 magic2;
u32 res4;
};
#endif /* __ASSEMBLY__ */
......
......@@ -39,9 +39,9 @@ ENTRY(_start)
.word RISCV_HEADER_VERSION
.word 0
.dword 0
.asciz RISCV_IMAGE_MAGIC
.word 0
.ascii RISCV_IMAGE_MAGIC
.balign 4
.ascii RISCV_IMAGE_MAGIC2
.word 0
.global _start_kernel
......
......@@ -1961,6 +1961,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
case KVM_S390_MCHK:
irq->u.mchk.mcic = s390int->parm64;
break;
case KVM_S390_INT_PFAULT_INIT:
irq->u.ext.ext_params = s390int->parm;
irq->u.ext.ext_params2 = s390int->parm64;
break;
case KVM_S390_RESTART:
case KVM_S390_INT_CLOCK_COMP:
case KVM_S390_INT_CPU_TIMER:
break;
default:
return -EINVAL;
}
return 0;
}
......
......@@ -1018,6 +1018,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
/* mark all the pages in active slots as dirty */
for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
ms = slots->memslots + slotnr;
if (!ms->dirty_bitmap)
return -EINVAL;
/*
* The second half of the bitmap is only used on x86,
* and would be wasted otherwise, so we put it to good
......@@ -4323,7 +4325,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
case KVM_S390_INTERRUPT: {
struct kvm_s390_interrupt s390int;
struct kvm_s390_irq s390irq;
struct kvm_s390_irq s390irq = {};
if (copy_from_user(&s390int, argp, sizeof(s390int)))
return -EFAULT;
......
......@@ -336,25 +336,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
{
long err;
if (!IS_ENABLED(CONFIG_SYSVIPC))
return -ENOSYS;
/* No need for backward compatibility. We can start fresh... */
if (call <= SEMTIMEDOP) {
switch (call) {
case SEMOP:
err = sys_semtimedop(first, ptr,
(unsigned int)second, NULL);
err = ksys_semtimedop(first, ptr,
(unsigned int)second, NULL);
goto out;
case SEMTIMEDOP:
err = sys_semtimedop(first, ptr, (unsigned int)second,
err = ksys_semtimedop(first, ptr, (unsigned int)second,
(const struct __kernel_timespec __user *)
(unsigned long) fifth);
(unsigned long) fifth);
goto out;
case SEMGET:
err = sys_semget(first, (int)second, (int)third);
err = ksys_semget(first, (int)second, (int)third);
goto out;
case SEMCTL: {
err = sys_semctl(first, second,
(int)third | IPC_64,
(unsigned long) ptr);
err = ksys_old_semctl(first, second,
(int)third | IPC_64,
(unsigned long) ptr);
goto out;
}
default:
......@@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
if (call <= MSGCTL) {
switch (call) {
case MSGSND:
err = sys_msgsnd(first, ptr, (size_t)second,
err = ksys_msgsnd(first, ptr, (size_t)second,
(int)third);
goto out;
case MSGRCV:
err = sys_msgrcv(first, ptr, (size_t)second, fifth,
err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
(int)third);
goto out;
case MSGGET:
err = sys_msgget((key_t)first, (int)second);
err = ksys_msgget((key_t)first, (int)second);
goto out;
case MSGCTL:
err = sys_msgctl(first, (int)second | IPC_64, ptr);
err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
......@@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
goto out;
}
case SHMDT:
err = sys_shmdt(ptr);
err = ksys_shmdt(ptr);
goto out;
case SHMGET:
err = sys_shmget(first, (size_t)second, (int)third);
err = ksys_shmget(first, (size_t)second, (int)third);
goto out;
case SHMCTL:
err = sys_shmctl(first, (int)second | IPC_64, ptr);
err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
goto out;
default:
err = -ENOSYS;
......
......@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
* Lower 12 bits encode the number of additional
* pages to flush (in addition to the 'cur' page).
*/
if (diff >= HV_TLB_FLUSH_UNIT)
if (diff >= HV_TLB_FLUSH_UNIT) {
gva_list[gva_n] |= ~PAGE_MASK;
else if (diff)
cur += HV_TLB_FLUSH_UNIT;
} else if (diff) {
gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
cur = end;
}
cur += HV_TLB_FLUSH_UNIT;
gva_n++;
} while (cur < end);
......
......@@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
BOOT_PARAM_PRESERVE(eddbuf_entries),
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
BOOT_PARAM_PRESERVE(secure_boot),
BOOT_PARAM_PRESERVE(hdr),
BOOT_PARAM_PRESERVE(e820_table),
BOOT_PARAM_PRESERVE(eddbuf),
......
......@@ -335,6 +335,7 @@ struct kvm_mmu_page {
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
unsigned long mmu_valid_gen;
DECLARE_BITMAP(unsync_child_bitmap, 512);
#ifdef CONFIG_X86_32
......@@ -856,6 +857,7 @@ struct kvm_arch {
unsigned long n_requested_mmu_pages;
unsigned long n_max_mmu_pages;
unsigned int indirect_shadow_pages;
unsigned long mmu_valid_gen;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
......
......@@ -444,8 +444,10 @@ __pu_label: \
({ \
int __gu_err; \
__inttype(*(ptr)) __gu_val; \
__typeof__(ptr) __gu_ptr = (ptr); \
__typeof__(size) __gu_size = (size); \
__uaccess_begin_nospec(); \
__get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
__get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
__uaccess_end(); \
(x) = (__force __typeof__(*(ptr)))__gu_val; \
__builtin_expect(__gu_err, 0); \
......
......@@ -834,6 +834,10 @@ bool __init apic_needs_pit(void)
if (!boot_cpu_has(X86_FEATURE_APIC))
return true;
/* Virt guests may lack ARAT, but still have DEADLINE */
if (!boot_cpu_has(X86_FEATURE_ARAT))
return true;
/* Deadline timer is based on TSC so no further PIT action required */
if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
return false;
......@@ -1179,10 +1183,6 @@ void clear_local_APIC(void)
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
v = apic_read(APIC_LVT1);
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
if (!x2apic_enabled()) {
v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
apic_write(APIC_LDR, v);
}
if (maxlvt >= 4) {
v = apic_read(APIC_LVTPC);
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
......
......@@ -2095,6 +2095,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
if (!direct)
sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
/*
* active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
* depends on valid pages being added to the head of the list. See
* comments in kvm_zap_obsolete_pages().
*/
list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
kvm_mod_used_mmu_pages(vcpu->kvm, +1);
return sp;
......@@ -2244,7 +2250,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
#define for_each_valid_sp(_kvm, _sp, _gfn) \
hlist_for_each_entry(_sp, \
&(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
if ((_sp)->role.invalid) { \
if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
} else
#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
......@@ -2301,6 +2307,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
static void mmu_audit_disable(void) { }
#endif
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}
static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
......@@ -2525,6 +2536,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (level > PT_PAGE_TABLE_LEVEL && need_sync)
flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
clear_page(sp->spt);
trace_kvm_mmu_get_page(sp, true);
......@@ -4233,6 +4245,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
return false;
if (cached_root_available(vcpu, new_cr3, new_role)) {
/*
* It is possible that the cached previous root page is
* obsolete because of a change in the MMU generation
* number. However, changing the generation number is
* accompanied by KVM_REQ_MMU_RELOAD, which will free
* the root set here and allocate a new one.
*/
kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
if (!skip_tlb_flush) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
......@@ -5649,11 +5668,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
return alloc_mmu_pages(vcpu);
}
static void kvm_zap_obsolete_pages(struct kvm *kvm)
{
struct kvm_mmu_page *sp, *node;
LIST_HEAD(invalid_list);
int ign;
restart:
list_for_each_entry_safe_reverse(sp, node,
&kvm->arch.active_mmu_pages, link) {
/*
* No obsolete valid page exists before a newly created page
* since active_mmu_pages is a FIFO list.
*/
if (!is_obsolete_sp(kvm, sp))
break;
/*
* Do not repeatedly zap a root page to avoid unnecessary
* KVM_REQ_MMU_RELOAD, otherwise we may not be able to
* progress:
* vcpu 0 vcpu 1
* call vcpu_enter_guest():
* 1): handle KVM_REQ_MMU_RELOAD
* and require mmu-lock to
* load mmu
* repeat:
* 1): zap root page and
* send KVM_REQ_MMU_RELOAD
*
* 2): if (cond_resched_lock(mmu-lock))
*
* 2): hold mmu-lock and load mmu
*
* 3): see KVM_REQ_MMU_RELOAD bit
* on vcpu->requests is set
* then return 1 to call
* vcpu_enter_guest() again.
* goto repeat;
*
* Since we are reversely walking the list and the invalid
* list will be moved to the head, skip the invalid page
* can help us to avoid the infinity list walking.
*/
if (sp->role.invalid)
continue;
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
kvm_mmu_commit_zap_page(kvm, &invalid_list);
cond_resched_lock(&kvm->mmu_lock);
goto restart;
}
if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
goto restart;
}
kvm_mmu_commit_zap_page(kvm, &invalid_list);
}
/*
* Fast invalidate all shadow pages and use lock-break technique
* to zap obsolete pages.
*
* It's required when memslot is being deleted or VM is being
* destroyed, in these cases, we should ensure that KVM MMU does
* not use any resource of the being-deleted slot or all slots
* after calling the function.
*/
static void kvm_mmu_zap_all_fast(struct kvm *kvm)
{
spin_lock(&kvm->mmu_lock);
kvm->arch.mmu_valid_gen++;
kvm_zap_obsolete_pages(kvm);
spin_unlock(&kvm->mmu_lock);
}
static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot,
struct kvm_page_track_notifier_node *node)
{
kvm_mmu_zap_all(kvm);
kvm_mmu_zap_all_fast(kvm);
}
void kvm_mmu_init_vm(struct kvm *kvm)
......
......@@ -4540,6 +4540,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
int len;
gva_t gva = 0;
struct vmcs12 *vmcs12;
struct x86_exception e;
short offset;
if (!nested_vmx_check_permission(vcpu))
......@@ -4588,7 +4589,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
vmx_instruction_info, true, len, &gva))
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL);
if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
kvm_inject_page_fault(vcpu, &e);
}