diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c16314807756172e48c2d4926589356a132f7a04..6a1d60956d63dea658e14ef98e1c6a1fb55387ec 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -711,14 +711,18 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 {
 	int rc;
-	int size;
+	unsigned size;
 	unsigned long linear;
 	int cur_size = ctxt->fetch.end - ctxt->fetch.data;
 	struct segmented_address addr = { .seg = VCPU_SREG_CS,
 					   .ea = ctxt->eip + cur_size };
 
-	size = min(15UL ^ cur_size,
-		   PAGE_SIZE - offset_in_page(addr.ea));
+	size = 15UL ^ cur_size;
+	rc = __linearize(ctxt, addr, size, false, true, &linear);
+	if (unlikely(rc != X86EMUL_CONTINUE))
+		return rc;
+
+	size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
 
 	/*
 	 * One instruction can only straddle two pages,
@@ -728,9 +732,6 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 	 */
 	if (unlikely(size < op_size))
 		return X86EMUL_UNHANDLEABLE;
-	rc = __linearize(ctxt, addr, size, false, true, &linear);
-	if (unlikely(rc != X86EMUL_CONTINUE))
-		return rc;
 	rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
 			      size, &ctxt->exception);
 	if (unlikely(rc != X86EMUL_CONTINUE))