fault.c 35.1 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2
/*
 *  Copyright (C) 1995  Linus Torvalds
Ingo Molnar's avatar
Ingo Molnar committed
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
Linus Torvalds's avatar
Linus Torvalds committed
5
 */
6 7 8 9
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
10
#include <linux/kprobes.h>		/* NOKPROBE_SYMBOL, ...		*/
11
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
12
#include <linux/perf_event.h>		/* perf_sw_event		*/
13
#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
14
#include <linux/prefetch.h>		/* prefetchw			*/
15
#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
16
#include <linux/uaccess.h>		/* faulthandler_disabled()	*/
Ingo Molnar's avatar
Ingo Molnar committed
17

18
#include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
19 20
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
Vegard Nossum's avatar
Vegard Nossum committed
21
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
22 23
#include <asm/fixmap.h>			/* VSYSCALL_ADDR		*/
#include <asm/vsyscall.h>		/* emulate_vsyscall		*/
24
#include <asm/vm86.h>			/* struct vm86			*/
25
#include <asm/mmu_context.h>		/* vma_pkey()			*/
Linus Torvalds's avatar
Linus Torvalds committed
26

27 28 29
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>

30
/*
Ingo Molnar's avatar
Ingo Molnar committed
31 32 33 34 35 36 37
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
38
 *   bit 5 ==				1: protection keys block access
39
 */
Ingo Molnar's avatar
Ingo Molnar committed
40 41 42 43 44 45 46
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
47
	PF_PK		=		1 << 5,
Ingo Molnar's avatar
Ingo Molnar committed
48
};
49

50
/*
51 52
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
53
 */
54
static nokprobe_inline int
55
kmmio_fault(struct pt_regs *regs, unsigned long addr)
56
{
57 58 59 60
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
61 62
}

63
static nokprobe_inline int kprobes_fault(struct pt_regs *regs)
64
{
65 66 67
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
68
	if (kprobes_built_in() && !user_mode(regs)) {
69 70 71 72 73
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
74

75
	return ret;
76
}
77

78
/*
Ingo Molnar's avatar
Ingo Molnar committed
79 80 81 82 83 84
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
85
 *
Ingo Molnar's avatar
Ingo Molnar committed
86
 * 64-bit mode:
87
 *
Ingo Molnar's avatar
Ingo Molnar committed
88 89 90 91
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
92
 */
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
119
		return (!user_mode(regs) || user_64bit_mode(regs));
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
140 141
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
142
{
Ingo Molnar's avatar
Ingo Molnar committed
143
	unsigned char *max_instr;
144
	unsigned char *instr;
145
	int prefetch = 0;
Linus Torvalds's avatar
Linus Torvalds committed
146

Ingo Molnar's avatar
Ingo Molnar committed
147 148 149 150
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
151
	if (error_code & PF_INSTR)
Linus Torvalds's avatar
Linus Torvalds committed
152
		return 0;
153

154
	instr = (void *)convert_ip_to_linear(current, regs);
155
	max_instr = instr + 15;
Linus Torvalds's avatar
Linus Torvalds committed
156

157
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE_MAX)
Linus Torvalds's avatar
Linus Torvalds committed
158 159
		return 0;

160
	while (instr < max_instr) {
Ingo Molnar's avatar
Ingo Molnar committed
161
		unsigned char opcode;
Linus Torvalds's avatar
Linus Torvalds committed
162

163
		if (probe_kernel_address(instr, opcode))
164
			break;
Linus Torvalds's avatar
Linus Torvalds committed
165 166 167

		instr++;

168
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
Linus Torvalds's avatar
Linus Torvalds committed
169 170 171 172 173
			break;
	}
	return prefetch;
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/*
 * A protection key fault means that the PKRU value did not allow
 * access to some PTE.  Userspace can figure out what PKRU was
 * from the XSAVE state, and this function fills out a field in
 * siginfo so userspace can discover which protection key was set
 * on the PTE.
 *
 * If we get here, we know that the hardware signaled a PF_PK
 * fault and that there was a VMA once we got in the fault
 * handler.  It does *not* guarantee that the VMA we find here
 * was the one that we faulted on.
 *
 * 1. T1   : mprotect_key(foo, PAGE_SIZE, pkey=4);
 * 2. T1   : set PKRU to deny access to pkey=4, touches page
 * 3. T1   : faults...
 * 4.    T2: mprotect_key(foo, PAGE_SIZE, pkey=5);
 * 5. T1   : enters fault handler, takes mmap_sem, etc...
 * 6. T1   : reaches here, sees vma_pkey(vma)=5, when we really
 *	     faulted on a pte with its pkey=4.
 */
static void fill_sig_info_pkey(int si_code, siginfo_t *info,
		struct vm_area_struct *vma)
{
	/* This is effectively an #ifdef */
	if (!boot_cpu_has(X86_FEATURE_OSPKE))
		return;

	/* Fault not from Protection Keys: nothing to do */
	if (si_code != SEGV_PKUERR)
		return;
	/*
	 * force_sig_info_fault() is called from a number of
	 * contexts, some of which have a VMA and some of which
	 * do not.  The PF_PK handing happens after we have a
	 * valid VMA, so we should never reach this without a
	 * valid VMA.
	 */
	if (!vma) {
		WARN_ONCE(1, "PKU fault with no VMA passed in");
		info->si_pkey = 0;
		return;
	}
	/*
	 * si_pkey should be thought of as a strong hint, but not
	 * absolutely guranteed to be 100% accurate because of
	 * the race explained above.
	 */
	info->si_pkey = vma_pkey(vma);
}

Ingo Molnar's avatar
Ingo Molnar committed
224 225
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
226 227
		     struct task_struct *tsk, struct vm_area_struct *vma,
		     int fault)
228
{
229
	unsigned lsb = 0;
230 231
	siginfo_t info;

Ingo Molnar's avatar
Ingo Molnar committed
232 233 234 235
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
236 237 238 239 240
	if (fault & VM_FAULT_HWPOISON_LARGE)
		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
	if (fault & VM_FAULT_HWPOISON)
		lsb = PAGE_SHIFT;
	info.si_addr_lsb = lsb;
Ingo Molnar's avatar
Ingo Molnar committed
241

242 243
	fill_sig_info_pkey(si_code, &info, vma);

244 245 246
	force_sig_info(si_signo, &info, tsk);
}

247 248 249 250 251
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
252
{
253 254 255 256
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
Ingo Molnar's avatar
Ingo Molnar committed
257

258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

279
	if (!pmd_present(*pmd))
280
		set_pmd(pmd, *pmd_k);
281
	else
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {
		struct page *page;

299
		spin_lock(&pgd_lock);
300
		list_for_each_entry(page, &pgd_list, lru) {
301
			spinlock_t *pgt_lock;
302
			pmd_t *ret;
303

304
			/* the pgt_lock only for Xen */
305 306 307 308 309 310 311
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
			ret = vmalloc_sync_one(page_address(page), address);
			spin_unlock(pgt_lock);

			if (!ret)
312 313
				break;
		}
314
		spin_unlock(&pgd_lock);
315 316 317 318 319 320 321 322
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
323
static noinline int vmalloc_fault(unsigned long address)
324 325 326 327 328 329 330 331 332
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

333 334
	WARN_ON_ONCE(in_nmi());

335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}
353
NOKPROBE_SYMBOL(vmalloc_fault);
354 355 356 357 358 359 360 361

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
362
#ifdef CONFIG_VM86
363 364
	unsigned long bit;

365
	if (!v8086_mode(regs) || !tsk->thread.vm86)
366 367 368 369
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
370 371
		tsk->thread.vm86->screen_bitmap |= 1 << bit;
#endif
372
}
Linus Torvalds's avatar
Linus Torvalds committed
373

Akinobu Mita's avatar
Akinobu Mita committed
374
static bool low_pfn(unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
375
{
Akinobu Mita's avatar
Akinobu Mita committed
376 377
	return pfn < max_low_pfn;
}
378

Akinobu Mita's avatar
Akinobu Mita committed
379 380 381 382 383 384
static void dump_pagetable(unsigned long address)
{
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(address)];
	pmd_t *pmd;
	pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
385

386
#ifdef CONFIG_X86_PAE
Akinobu Mita's avatar
Akinobu Mita committed
387 388 389
	printk("*pdpt = %016Lx ", pgd_val(*pgd));
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
390
#endif
Akinobu Mita's avatar
Akinobu Mita committed
391 392
	pmd = pmd_offset(pud_offset(pgd, address), address);
	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
393 394 395 396 397

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
Ingo Molnar's avatar
Ingo Molnar committed
398
	 * it's allocated already:
399
	 */
Akinobu Mita's avatar
Akinobu Mita committed
400 401
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
402

Akinobu Mita's avatar
Akinobu Mita committed
403 404 405
	pte = pte_offset_kernel(pmd, address);
	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
406
	printk("\n");
407 408 409 410 411 412
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
413
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END, 0);
414 415 416 417 418 419 420 421 422
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
423
static noinline int vmalloc_fault(unsigned long address)
424 425 426 427 428 429 430 431 432 433
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

434 435
	WARN_ON_ONCE(in_nmi());

436 437 438 439 440 441 442 443 444 445
	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

446
	if (pgd_none(*pgd)) {
447
		set_pgd(pgd, *pgd_ref);
448 449
		arch_flush_lazy_mmu_mode();
	} else {
450
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
451
	}
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}
490
NOKPROBE_SYMBOL(vmalloc_fault);
491

492
#ifdef CONFIG_CPU_SUP_AMD
493
static const char errata93_warning[] =
494 495 496 497 498
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
499
#endif
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
Akinobu Mita's avatar
Akinobu Mita committed
519 520
	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
	pgd_t *pgd = base + pgd_index(address);
Linus Torvalds's avatar
Linus Torvalds committed
521 522 523 524
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

Ingo Molnar's avatar
Ingo Molnar committed
525 526 527
	if (bad_address(pgd))
		goto bad;

528
	printk("PGD %lx ", pgd_val(*pgd));
Ingo Molnar's avatar
Ingo Molnar committed
529 530 531

	if (!pgd_present(*pgd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
532

533
	pud = pud_offset(pgd, address);
Ingo Molnar's avatar
Ingo Molnar committed
534 535 536
	if (bad_address(pud))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
537
	printk("PUD %lx ", pud_val(*pud));
538
	if (!pud_present(*pud) || pud_large(*pud))
Ingo Molnar's avatar
Ingo Molnar committed
539
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
540 541

	pmd = pmd_offset(pud, address);
Ingo Molnar's avatar
Ingo Molnar committed
542 543 544
	if (bad_address(pmd))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
545
	printk("PMD %lx ", pmd_val(*pmd));
Ingo Molnar's avatar
Ingo Molnar committed
546 547
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
548 549

	pte = pte_offset_kernel(pmd, address);
Ingo Molnar's avatar
Ingo Molnar committed
550 551 552
	if (bad_address(pte))
		goto bad;

553
	printk("PTE %lx", pte_val(*pte));
Ingo Molnar's avatar
Ingo Molnar committed
554
out:
Linus Torvalds's avatar
Linus Torvalds committed
555 556 557 558
	printk("\n");
	return;
bad:
	printk("BAD\n");
559 560
}

561
#endif /* CONFIG_X86_64 */
Linus Torvalds's avatar
Linus Torvalds committed
562

Ingo Molnar's avatar
Ingo Molnar committed
563 564 565 566 567 568 569 570 571 572 573 574 575
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
576
 */
577
static int is_errata93(struct pt_regs *regs, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
578
{
579 580 581 582 583
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
	    || boot_cpu_data.x86 != 0xf)
		return 0;

584
	if (address != regs->ip)
Linus Torvalds's avatar
Linus Torvalds committed
585
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
586

587
	if ((address >> 32) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
588
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
589

Linus Torvalds's avatar
Linus Torvalds committed
590
	address |= 0xffffffffUL << 32;
591 592
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
593
		printk_once(errata93_warning);
594
		regs->ip = address;
Linus Torvalds's avatar
Linus Torvalds committed
595 596
		return 1;
	}
597
#endif
Linus Torvalds's avatar
Linus Torvalds committed
598
	return 0;
599
}
Linus Torvalds's avatar
Linus Torvalds committed
600

601
/*
Ingo Molnar's avatar
Ingo Molnar committed
602 603 604 605 606
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
607 608 609 610 611
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
Ingo Molnar's avatar
Ingo Molnar committed
612
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
613 614 615 616 617
		return 1;
#endif
	return 0;
}

618 619 620 621
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
Ingo Molnar's avatar
Ingo Molnar committed
622

623
	/*
Ingo Molnar's avatar
Ingo Molnar committed
624
	 * Pentium F0 0F C7 C8 bug workaround:
625
	 */
626
	if (boot_cpu_has_bug(X86_BUG_F00F)) {
627 628 629 630 631 632 633 634 635 636 637
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

638 639
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
640 641
static const char smep_warning[] = KERN_CRIT
"unable to execute userspace code (SMEP?) (uid: %d)\n";
642

Ingo Molnar's avatar
Ingo Molnar committed
643 644 645
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
646
{
647 648 649 650
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
651
		unsigned int level;
652 653
		pgd_t *pgd;
		pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
654

655 656 657 658
		pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
		pgd += pgd_index(address);

		pte = lookup_address_in_pgd(pgd, address, &level);
659

660
		if (pte && pte_present(*pte) && !pte_exec(*pte))
661
			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
662 663
		if (pte && pte_present(*pte) && pte_exec(*pte) &&
				(pgd_flags(*pgd) & _PAGE_USER) &&
664
				(__read_cr4() & X86_CR4_SMEP))
665
			printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
666 667
	}

668
	printk(KERN_ALERT "BUG: unable to handle kernel ");
669
	if (address < PAGE_SIZE)
670
		printk(KERN_CONT "NULL pointer dereference");
671
	else
672
		printk(KERN_CONT "paging request");
Ingo Molnar's avatar
Ingo Molnar committed
673

674
	printk(KERN_CONT " at %p\n", (void *) address);
675
	printk(KERN_ALERT "IP:");
676
	printk_address(regs->ip);
Ingo Molnar's avatar
Ingo Molnar committed
677

678 679 680
	dump_pagetable(address);
}

Ingo Molnar's avatar
Ingo Molnar committed
681 682 683
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
684
{
Ingo Molnar's avatar
Ingo Molnar committed
685 686 687 688 689 690 691
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
692

Linus Torvalds's avatar
Linus Torvalds committed
693
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
694
	       tsk->comm, address);
Linus Torvalds's avatar
Linus Torvalds committed
695
	dump_pagetable(address);
Ingo Molnar's avatar
Ingo Molnar committed
696 697

	tsk->thread.cr2		= address;
698
	tsk->thread.trap_nr	= X86_TRAP_PF;
Ingo Molnar's avatar
Ingo Molnar committed
699 700
	tsk->thread.error_code	= error_code;

701
	if (__die("Bad pagetable", regs, error_code))
702
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
703

704
	oops_end(flags, regs, sig);
Linus Torvalds's avatar
Linus Torvalds committed
705 706
}

Ingo Molnar's avatar
Ingo Molnar committed
707 708
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
709
	   unsigned long address, int signal, int si_code)
710 711 712 713
{
	struct task_struct *tsk = current;
	unsigned long flags;
	int sig;
714 715
	/* No context means no VMA to pass down */
	struct vm_area_struct *vma = NULL;
716

Ingo Molnar's avatar
Ingo Molnar committed
717
	/* Are we prepared to handle this kernel fault? */
718
	if (fixup_exception(regs)) {
719 720 721 722 723 724 725 726 727 728 729 730 731 732
		/*
		 * Any interrupt that takes a fault gets the fixup. This makes
		 * the below recursive fault logic only apply to a faults from
		 * task context.
		 */
		if (in_interrupt())
			return;

		/*
		 * Per the above we're !in_interrupt(), aka. task context.
		 *
		 * In this case we need to make sure we're not recursively
		 * faulting through the emulate_vsyscall() logic.
		 */
733
		if (current_thread_info()->sig_on_uaccess_error && signal) {
734
			tsk->thread.trap_nr = X86_TRAP_PF;
735 736 737 738
			tsk->thread.error_code = error_code | PF_USER;
			tsk->thread.cr2 = address;

			/* XXX: hwpoison faults will set the wrong code. */
739 740
			force_sig_info_fault(signal, si_code, address,
					     tsk, vma, 0);
741
		}
742 743 744 745

		/*
		 * Barring that, we can do the fixup and be happy.
		 */
746
		return;
747
	}
748 749

	/*
Ingo Molnar's avatar
Ingo Molnar committed
750 751 752 753 754 755 756
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
757
	 *
Ingo Molnar's avatar
Ingo Molnar committed
758
	 *   Hall of shame of CPU/BIOS bugs.
759 760 761 762 763 764 765 766 767
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
Ingo Molnar's avatar
Ingo Molnar committed
768
	 * terminate things with extreme prejudice:
769 770 771 772 773
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

774
	if (task_stack_end_corrupted(tsk))
775
		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
776

777
	tsk->thread.cr2		= address;
778
	tsk->thread.trap_nr	= X86_TRAP_PF;
779
	tsk->thread.error_code	= error_code;
780 781 782 783

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
784

785
	/* Executive summary in case the body of the oops scrolled away */
786
	printk(KERN_DEFAULT "CR2: %016lx\n", address);
Ingo Molnar's avatar
Ingo Molnar committed
787

788 789 790
	oops_end(flags, regs, sig);
}

Ingo Molnar's avatar
Ingo Molnar committed
791 792 793 794 795 796 797 798 799 800 801 802 803 804
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

805
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
Ingo Molnar's avatar
Ingo Molnar committed
806 807 808 809 810 811 812 813 814 815 816
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
817 818
		       unsigned long address, struct vm_area_struct *vma,
		       int si_code)
819 820 821 822 823 824
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
825
		 * It's possible to have interrupts off here:
826 827 828 829 830
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
Ingo Molnar's avatar
Ingo Molnar committed
831
		 * from user space:
832 833 834 835 836 837 838
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

839 840 841 842 843 844
#ifdef CONFIG_X86_64
		/*
		 * Instruction fetch faults in the vsyscall page might need
		 * emulation.
		 */
		if (unlikely((error_code & PF_INSTR) &&
845
			     ((address & ~0xfff) == VSYSCALL_ADDR))) {
846 847 848 849
			if (emulate_vsyscall(regs, address))
				return;
		}
#endif
850 851 852
		/* Kernel addresses are always protection faults: */
		if (address >= TASK_SIZE)
			error_code |= PF_PROT;
853

854
		if (likely(show_unhandled_signals))
Ingo Molnar's avatar
Ingo Molnar committed
855 856 857
			show_signal_msg(regs, error_code, address, tsk);

		tsk->thread.cr2		= address;
858
		tsk->thread.error_code	= error_code;
859
		tsk->thread.trap_nr	= X86_TRAP_PF;
860

861
		force_sig_info_fault(SIGSEGV, si_code, address, tsk, vma, 0);
Ingo Molnar's avatar
Ingo Molnar committed
862

863 864 865 866 867 868
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

869
	no_context(regs, error_code, address, SIGSEGV, si_code);
870 871
}

Ingo Molnar's avatar
Ingo Molnar committed
872 873
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
874
		     unsigned long address, struct vm_area_struct *vma)
875
{
876
	__bad_area_nosemaphore(regs, error_code, address, vma, SEGV_MAPERR);
877 878
}

Ingo Molnar's avatar
Ingo Molnar committed
879 880
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
881
	   unsigned long address,  struct vm_area_struct *vma, int si_code)
882 883 884 885 886 887 888 889 890
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

891
	__bad_area_nosemaphore(regs, error_code, address, vma, si_code);
892 893
}

Ingo Molnar's avatar
Ingo Molnar committed
894 895
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
896
{
897
	__bad_area(regs, error_code, address, NULL, SEGV_MAPERR);
898 899
}

Ingo Molnar's avatar
Ingo Molnar committed
900 901
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
902
		      unsigned long address, struct vm_area_struct *vma)
903
{
904 905 906 907 908 909 910 911 912
	/*
	 * This OSPKE check is not strictly necessary at runtime.
	 * But, doing it this way allows compiler optimizations
	 * if pkeys are compiled out.
	 */
	if (boot_cpu_has(X86_FEATURE_OSPKE) && (error_code & PF_PK))
		__bad_area(regs, error_code, address, vma, SEGV_PKUERR);
	else
		__bad_area(regs, error_code, address, vma, SEGV_ACCERR);
913 914
}

Ingo Molnar's avatar
Ingo Molnar committed
915
static void
916
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
917
	  struct vm_area_struct *vma, unsigned int fault)
918 919
{
	struct task_struct *tsk = current;
920
	int code = BUS_ADRERR;
921

Ingo Molnar's avatar
Ingo Molnar committed
922
	/* Kernel mode? Handle exceptions or die: */
923
	if (!(error_code & PF_USER)) {
924
		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
925 926
		return;
	}
Ingo Molnar's avatar
Ingo Molnar committed
927

928
	/* User-space => ok to do another page fault: */
929 930
	if (is_prefetch(regs, error_code, address))
		return;
Ingo Molnar's avatar
Ingo Molnar committed
931 932 933

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
934
	tsk->thread.trap_nr	= X86_TRAP_PF;
Ingo Molnar's avatar
Ingo Molnar committed
935

936
#ifdef CONFIG_MEMORY_FAILURE
937
	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
938 939 940 941 942 943
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
944
	force_sig_info_fault(SIGBUS, code, address, tsk, vma, fault);
945 946
}

947
static noinline void
Ingo Molnar's avatar
Ingo Molnar committed
948
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
949 950
	       unsigned long address, struct vm_area_struct *vma,
	       unsigned int fault)
951
{
952 953 954
	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
		no_context(regs, error_code, address, 0, 0);
		return;
955 956
	}

Ingo Molnar's avatar
Ingo Molnar committed
957
	if (fault & VM_FAULT_OOM) {
958 959
		/* Kernel mode? Handle exceptions or die: */
		if (!(error_code & PF_USER)) {
960 961
			no_context(regs, error_code, address,
				   SIGSEGV, SEGV_MAPERR);
962
			return;
963 964
		}

965 966 967 968 969 970
		/*
		 * We ran out of memory, call the OOM killer, and return the
		 * userspace (which will retry the fault, or kill us if we got
		 * oom-killed):
		 */
		pagefault_out_of_memory();
Ingo Molnar's avatar
Ingo Molnar committed
971
	} else {
972 973
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
			     VM_FAULT_HWPOISON_LARGE))
974
			do_sigbus(regs, error_code, address, vma, fault);
975
		else if (fault & VM_FAULT_SIGSEGV)
976
			bad_area_nosemaphore(regs, error_code, address, vma);
Ingo Molnar's avatar
Ingo Molnar committed
977 978 979
		else
			BUG();
	}
980 981
}

982 983 984 985
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
986

987 988
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;
989 990 991 992 993 994
	/*
	 * Note: We do not do lazy flushing on protection key
	 * changes, so no spurious fault will ever set PF_PK.