diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0c7efb1de29de3cc575dbb7dee04d8f7163ec7d2..91f4f528815020b9f2c634e2217aaaad5f106055 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3305,21 +3305,17 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit
 			*exit_qual = has_payload ? payload : vcpu->arch.cr2;
 			return 1;
 		}
-	} else {
-		/*
-		 * FIXME: we must not write DR6 when L1 intercepts an
-		 * L2 #DB exception.
-		 */
-		if (vmcs12->exception_bitmap & (1u << nr)) {
-			if (nr == DB_VECTOR) {
-				*exit_qual = vcpu->arch.dr6;
-				*exit_qual &= ~(DR6_FIXED_1 | DR6_BT);
-				*exit_qual ^= DR6_RTM;
-			} else {
-				*exit_qual = 0;
+	} else if (vmcs12->exception_bitmap & (1u << nr)) {
+		if (nr == DB_VECTOR) {
+			if (!has_payload) {
+				payload = vcpu->arch.dr6;
+				payload &= ~(DR6_FIXED_1 | DR6_BT);
+				payload ^= DR6_RTM;
 			}
-			return 1;
-		}
+			*exit_qual = payload;
+		} else
+			*exit_qual = 0;
+		return 1;
 	}
 
 	return 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4e9536c335d6c4bc7f978e272e60e7a08188514a..bd4e402b2e79c889a0e6b953d8086ddd8d978a32 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -410,6 +410,28 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
 		return;
 
 	switch (nr) {
+	case DB_VECTOR:
+		/*
+		 * "Certain debug exceptions may clear bit 0-3.  The
+		 * remaining contents of the DR6 register are never
+		 * cleared by the processor".
+		 */
+		vcpu->arch.dr6 &= ~DR_TRAP_BITS;
+		/*
+		 * DR6.RTM is set by all #DB exceptions that don't clear it.
+		 */
+		vcpu->arch.dr6 |= DR6_RTM;
+		vcpu->arch.dr6 |= payload;
+		/*
+		 * Bit 16 should be set in the payload whenever the #DB
+		 * exception should clear DR6.RTM. This makes the payload
+		 * compatible with the pending debug exceptions under VMX.
+		 * Though not currently documented in the SDM, this also
+		 * makes the payload compatible with the exit qualification
+		 * for #DB exceptions under VMX.
+		 */
+		vcpu->arch.dr6 ^= payload & DR6_RTM;
+		break;
 	case PF_VECTOR:
 		vcpu->arch.cr2 = payload;
 		break;
@@ -464,11 +486,13 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
 		/*
 		 * In guest mode, payload delivery should be deferred,
 		 * so that the L1 hypervisor can intercept #PF before
-		 * CR2 is modified.  However, for ABI compatibility
-		 * with KVM_GET_VCPU_EVENTS and KVM_SET_VCPU_EVENTS,
-		 * we can't delay payload delivery unless userspace
-		 * has enabled this functionality via the per-VM
-		 * capability, KVM_CAP_EXCEPTION_PAYLOAD.
+		 * CR2 is modified (or intercept #DB before DR6 is
+		 * modified under nVMX).  However, for ABI
+		 * compatibility with KVM_GET_VCPU_EVENTS and
+		 * KVM_SET_VCPU_EVENTS, we can't delay payload
+		 * delivery unless userspace has enabled this
+		 * functionality via the per-VM capability,
+		 * KVM_CAP_EXCEPTION_PAYLOAD.
 		 */
 		if (!vcpu->kvm->arch.exception_payload_enabled ||
 		    !is_guest_mode(vcpu))
@@ -518,6 +542,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
 }
 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
 
+static void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr,
+				  unsigned long payload)
+{
+	kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false);
+}
+
 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr,
 				    u32 error_code, unsigned long payload)
 {
@@ -6156,14 +6186,7 @@ static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r)
 		kvm_run->exit_reason = KVM_EXIT_DEBUG;
 		*r = EMULATE_USER_EXIT;
 	} else {
-		/*
-		 * "Certain debug exceptions may clear bit 0-3.  The
-		 * remaining contents of the DR6 register are never
-		 * cleared by the processor".
-		 */
-		vcpu->arch.dr6 &= ~15;
-		vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
-		kvm_queue_exception(vcpu, DB_VECTOR);
+		kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS);
 	}
 }
 
@@ -7102,10 +7125,22 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
 			__kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
 					     X86_EFLAGS_RF);
 
-		if (vcpu->arch.exception.nr == DB_VECTOR &&
-		    (vcpu->arch.dr7 & DR7_GD)) {
-			vcpu->arch.dr7 &= ~DR7_GD;
-			kvm_update_dr7(vcpu);
+		if (vcpu->arch.exception.nr == DB_VECTOR) {
+			/*
+			 * This code assumes that nSVM doesn't use
+			 * check_nested_events(). If it does, the
+			 * DR6/DR7 changes should happen before L1
+			 * gets a #VMEXIT for an intercepted #DB in
+			 * L2.  (Under VMX, on the other hand, the
+			 * DR6/DR7 changes should not happen in the
+			 * event of a VM-exit to L1 for an intercepted
+			 * #DB in L2.)
+			 */
+			kvm_deliver_exception_payload(vcpu);
+			if (vcpu->arch.dr7 & DR7_GD) {
+				vcpu->arch.dr7 &= ~DR7_GD;
+				kvm_update_dr7(vcpu);
+			}
 		}
 
 		kvm_x86_ops->queue_exception(vcpu);