fault.c 26.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2
/*
 *  Copyright (C) 1995  Linus Torvalds
Ingo Molnar's avatar
Ingo Molnar committed
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
Linus Torvalds's avatar
Linus Torvalds committed
5
 */
6 7 8 9 10 11 12
#include <linux/magic.h>		/* STACK_END_MAGIC		*/
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
#include <linux/kprobes.h>		/* __kprobes, ...		*/
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13
#include <linux/perf_event.h>		/* perf_sw_event		*/
Ingo Molnar's avatar
Ingo Molnar committed
14

15 16
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
Vegard Nossum's avatar
Vegard Nossum committed
17
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
Linus Torvalds's avatar
Linus Torvalds committed
18

19
/*
Ingo Molnar's avatar
Ingo Molnar committed
20 21 22 23 24 25 26
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
27
 */
Ingo Molnar's avatar
Ingo Molnar committed
28 29 30 31 32 33 34 35
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
};
36

37
/*
38 39
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
40
 */
41
static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
42
{
43 44 45 46
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
47 48
}

49
static inline int notify_page_fault(struct pt_regs *regs)
50
{
51 52 53
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
54
	if (kprobes_built_in() && !user_mode_vm(regs)) {
55 56 57 58 59
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
60

61
	return ret;
62
}
63

64
/*
Ingo Molnar's avatar
Ingo Molnar committed
65 66 67 68 69 70
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
71
 *
Ingo Molnar's avatar
Ingo Molnar committed
72
 * 64-bit mode:
73
 *
Ingo Molnar's avatar
Ingo Molnar committed
74 75 76 77
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
78
 */
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
		return (!user_mode(regs)) || (regs->cs == __USER_CS);
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
126 127
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
128
{
Ingo Molnar's avatar
Ingo Molnar committed
129
	unsigned char *max_instr;
130
	unsigned char *instr;
131
	int prefetch = 0;
Linus Torvalds's avatar
Linus Torvalds committed
132

Ingo Molnar's avatar
Ingo Molnar committed
133 134 135 136
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
137
	if (error_code & PF_INSTR)
Linus Torvalds's avatar
Linus Torvalds committed
138
		return 0;
139

140
	instr = (void *)convert_ip_to_linear(current, regs);
141
	max_instr = instr + 15;
Linus Torvalds's avatar
Linus Torvalds committed
142

143
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
144 145
		return 0;

146
	while (instr < max_instr) {
Ingo Molnar's avatar
Ingo Molnar committed
147
		unsigned char opcode;
Linus Torvalds's avatar
Linus Torvalds committed
148

149
		if (probe_kernel_address(instr, opcode))
150
			break;
Linus Torvalds's avatar
Linus Torvalds committed
151 152 153

		instr++;

154
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157 158 159
			break;
	}
	return prefetch;
}

Ingo Molnar's avatar
Ingo Molnar committed
160 161 162
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
		     struct task_struct *tsk)
163 164 165
{
	siginfo_t info;

Ingo Molnar's avatar
Ingo Molnar committed
166 167 168 169
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
170
	info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0;
Ingo Molnar's avatar
Ingo Molnar committed
171

172 173 174
	force_sig_info(si_signo, &info, tsk);
}

175 176 177 178 179
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
180
{
181 182 183 184
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
Ingo Molnar's avatar
Ingo Molnar committed
185

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

207
	if (!pmd_present(*pmd))
208
		set_pmd(pmd, *pmd_k);
209
	else
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {

		unsigned long flags;
		struct page *page;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
			if (!vmalloc_sync_one(page_address(page), address))
				break;
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
static noinline int vmalloc_fault(unsigned long address)
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
	unsigned long bit;

	if (!v8086_mode(regs))
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
		tsk->thread.screen_bitmap |= 1 << bit;
287
}
Linus Torvalds's avatar
Linus Torvalds committed
288

Akinobu Mita's avatar
Akinobu Mita committed
289
static bool low_pfn(unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
290
{
Akinobu Mita's avatar
Akinobu Mita committed
291 292
	return pfn < max_low_pfn;
}
293

Akinobu Mita's avatar
Akinobu Mita committed
294 295 296 297 298 299
static void dump_pagetable(unsigned long address)
{
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(address)];
	pmd_t *pmd;
	pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
300

301
#ifdef CONFIG_X86_PAE
Akinobu Mita's avatar
Akinobu Mita committed
302 303 304
	printk("*pdpt = %016Lx ", pgd_val(*pgd));
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
305
#endif
Akinobu Mita's avatar
Akinobu Mita committed
306 307
	pmd = pmd_offset(pud_offset(pgd, address), address);
	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
308 309 310 311 312

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
Ingo Molnar's avatar
Ingo Molnar committed
313
	 * it's allocated already:
314
	 */
Akinobu Mita's avatar
Akinobu Mita committed
315 316
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
317

Akinobu Mita's avatar
Akinobu Mita committed
318 319 320
	pte = pte_offset_kernel(pmd, address);
	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
321
	printk("\n");
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
	unsigned long address;

	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
	     address += PGDIR_SIZE) {

		const pgd_t *pgd_ref = pgd_offset_k(address);
		unsigned long flags;
		struct page *page;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);
			else
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
static noinline int vmalloc_fault(unsigned long address)
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
	else
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}

static const char errata93_warning[] =
425 426 427 428 429
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
Akinobu Mita's avatar
Akinobu Mita committed
449 450
	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
	pgd_t *pgd = base + pgd_index(address);
Linus Torvalds's avatar
Linus Torvalds committed
451 452 453 454
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

Ingo Molnar's avatar
Ingo Molnar committed
455 456 457
	if (bad_address(pgd))
		goto bad;

458
	printk("PGD %lx ", pgd_val(*pgd));
Ingo Molnar's avatar
Ingo Molnar committed
459 460 461

	if (!pgd_present(*pgd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
462

463
	pud = pud_offset(pgd, address);
Ingo Molnar's avatar
Ingo Molnar committed
464 465 466
	if (bad_address(pud))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
467
	printk("PUD %lx ", pud_val(*pud));
468
	if (!pud_present(*pud) || pud_large(*pud))
Ingo Molnar's avatar
Ingo Molnar committed
469
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
470 471

	pmd = pmd_offset(pud, address);
Ingo Molnar's avatar
Ingo Molnar committed
472 473 474
	if (bad_address(pmd))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
475
	printk("PMD %lx ", pmd_val(*pmd));
Ingo Molnar's avatar
Ingo Molnar committed
476 477
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
478 479

	pte = pte_offset_kernel(pmd, address);
Ingo Molnar's avatar
Ingo Molnar committed
480 481 482
	if (bad_address(pte))
		goto bad;

483
	printk("PTE %lx", pte_val(*pte));
Ingo Molnar's avatar
Ingo Molnar committed
484
out:
Linus Torvalds's avatar
Linus Torvalds committed
485 486 487 488
	printk("\n");
	return;
bad:
	printk("BAD\n");
489 490
}

491
#endif /* CONFIG_X86_64 */
Linus Torvalds's avatar
Linus Torvalds committed
492

Ingo Molnar's avatar
Ingo Molnar committed
493 494 495 496 497 498 499 500 501 502 503 504 505
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
506
 */
507
static int is_errata93(struct pt_regs *regs, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
508
{
509
#ifdef CONFIG_X86_64
510
	if (address != regs->ip)
Linus Torvalds's avatar
Linus Torvalds committed
511
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
512

513
	if ((address >> 32) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
514
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
515

Linus Torvalds's avatar
Linus Torvalds committed
516
	address |= 0xffffffffUL << 32;
517 518
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
519
		printk_once(errata93_warning);
520
		regs->ip = address;
Linus Torvalds's avatar
Linus Torvalds committed
521 522
		return 1;
	}
523
#endif
Linus Torvalds's avatar
Linus Torvalds committed
524
	return 0;
525
}
Linus Torvalds's avatar
Linus Torvalds committed
526

527
/*
Ingo Molnar's avatar
Ingo Molnar committed
528 529 530 531 532
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
533 534 535 536 537
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
Ingo Molnar's avatar
Ingo Molnar committed
538
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
539 540 541 542 543
		return 1;
#endif
	return 0;
}

544 545 546 547
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
Ingo Molnar's avatar
Ingo Molnar committed
548

549
	/*
Ingo Molnar's avatar
Ingo Molnar committed
550
	 * Pentium F0 0F C7 C8 bug workaround:
551 552 553 554 555 556 557 558 559 560 561 562 563
	 */
	if (boot_cpu_data.f00f_bug) {
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

564 565 566
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";

Ingo Molnar's avatar
Ingo Molnar committed
567 568 569
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
570
{
571 572 573 574
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
575
		unsigned int level;
Ingo Molnar's avatar
Ingo Molnar committed
576

577 578
		pte_t *pte = lookup_address(address, &level);

579 580
		if (pte && pte_present(*pte) && !pte_exec(*pte))
			printk(nx_warning, current_uid());
581 582
	}

583
	printk(KERN_ALERT "BUG: unable to handle kernel ");
584
	if (address < PAGE_SIZE)
585
		printk(KERN_CONT "NULL pointer dereference");
586
	else
587
		printk(KERN_CONT "paging request");
Ingo Molnar's avatar
Ingo Molnar committed
588

589
	printk(KERN_CONT " at %p\n", (void *) address);
590
	printk(KERN_ALERT "IP:");
591
	printk_address(regs->ip, 1);
Ingo Molnar's avatar
Ingo Molnar committed
592

593 594 595
	dump_pagetable(address);
}

Ingo Molnar's avatar
Ingo Molnar committed
596 597 598
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
599
{
Ingo Molnar's avatar
Ingo Molnar committed
600 601 602 603 604 605 606
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
607

Linus Torvalds's avatar
Linus Torvalds committed
608
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
609
	       tsk->comm, address);
Linus Torvalds's avatar
Linus Torvalds committed
610
	dump_pagetable(address);
Ingo Molnar's avatar
Ingo Molnar committed
611 612 613 614 615

	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;

616
	if (__die("Bad pagetable", regs, error_code))
617
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
618

619
	oops_end(flags, regs, sig);
Linus Torvalds's avatar
Linus Torvalds committed
620 621
}

Ingo Molnar's avatar
Ingo Molnar committed
622 623 624
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address)
625 626
{
	struct task_struct *tsk = current;
627
	unsigned long *stackend;
628 629 630
	unsigned long flags;
	int sig;

Ingo Molnar's avatar
Ingo Molnar committed
631
	/* Are we prepared to handle this kernel fault? */
632 633 634 635
	if (fixup_exception(regs))
		return;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
636 637 638 639 640 641 642
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
643
	 *
Ingo Molnar's avatar
Ingo Molnar committed
644
	 *   Hall of shame of CPU/BIOS bugs.
645 646 647 648 649 650 651 652 653
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
Ingo Molnar's avatar
Ingo Molnar committed
654
	 * terminate things with extreme prejudice:
655 656 657 658 659
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

Ingo Molnar's avatar
Ingo Molnar committed
660
	stackend = end_of_stack(tsk);
661 662 663
	if (*stackend != STACK_END_MAGIC)
		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");

664 665 666
	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;
667 668 669 670

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
671

672 673
	/* Executive summary in case the body of the oops scrolled away */
	printk(KERN_EMERG "CR2: %016lx\n", address);
Ingo Molnar's avatar
Ingo Molnar committed
674

675 676 677
	oops_end(flags, regs, sig);
}

Ingo Molnar's avatar
Ingo Molnar committed
678 679 680 681 682 683 684 685 686 687 688 689 690 691
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

692
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
Ingo Molnar's avatar
Ingo Molnar committed
693 694 695 696 697 698 699 700 701 702 703 704
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address, int si_code)
705 706 707 708 709 710
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
711
		 * It's possible to have interrupts off here:
712 713 714 715 716
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
Ingo Molnar's avatar
Ingo Molnar committed
717
		 * from user space:
718 719 720 721 722 723 724
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

Ingo Molnar's avatar
Ingo Molnar committed
725 726 727 728 729 730 731
		if (unlikely(show_unhandled_signals))
			show_signal_msg(regs, error_code, address, tsk);

		/* Kernel addresses are always protection faults: */
		tsk->thread.cr2		= address;
		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
		tsk->thread.trap_no	= 14;
732 733

		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
Ingo Molnar's avatar
Ingo Molnar committed
734

735 736 737 738 739 740 741 742 743
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

	no_context(regs, error_code, address);
}

Ingo Molnar's avatar
Ingo Molnar committed
744 745 746
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		     unsigned long address)
747 748 749 750
{
	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
751 752 753
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address, int si_code)
754 755 756 757 758 759 760 761 762 763 764 765
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	__bad_area_nosemaphore(regs, error_code, address, si_code);
}

Ingo Molnar's avatar
Ingo Molnar committed
766 767
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
768 769 770 771
{
	__bad_area(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
772 773 774
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
		      unsigned long address)
775 776 777 778 779
{
	__bad_area(regs, error_code, address, SEGV_ACCERR);
}

/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
Ingo Molnar's avatar
Ingo Molnar committed
780 781 782
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
	      unsigned long address)
783 784 785
{
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
Ingo Molnar's avatar
Ingo Molnar committed
786
	 * (which will retry the fault, or kill us if we got oom-killed):
787 788
	 */
	up_read(&current->mm->mmap_sem);
Ingo Molnar's avatar
Ingo Molnar committed
789

790 791 792
	pagefault_out_of_memory();
}

Ingo Molnar's avatar
Ingo Molnar committed
793
static void
794 795
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
	  unsigned int fault)
796 797 798
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
799
	int code = BUS_ADRERR;
800 801 802

	up_read(&mm->mmap_sem);

Ingo Molnar's avatar
Ingo Molnar committed
803
	/* Kernel mode? Handle exceptions or die: */
804 805
	if (!(error_code & PF_USER))
		no_context(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
806

807
	/* User-space => ok to do another page fault: */
808 809
	if (is_prefetch(regs, error_code, address))
		return;
Ingo Molnar's avatar
Ingo Molnar committed
810 811 812 813 814

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
	tsk->thread.trap_no	= 14;

815 816 817 818 819 820 821 822 823
#ifdef CONFIG_MEMORY_FAILURE
	if (fault & VM_FAULT_HWPOISON) {
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
	force_sig_info_fault(SIGBUS, code, address, tsk);
824 825
}

Ingo Molnar's avatar
Ingo Molnar committed
826 827 828
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
829
{
Ingo Molnar's avatar
Ingo Molnar committed
830
	if (fault & VM_FAULT_OOM) {
831
		out_of_memory(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
832
	} else {
833 834
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
			do_sigbus(regs, error_code, address, fault);
Ingo Molnar's avatar
Ingo Molnar committed
835 836 837
		else
			BUG();
	}
838 839
}

840 841 842 843
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
844

845 846 847 848 849 850
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

851
/*
Ingo Molnar's avatar
Ingo Molnar committed
852 853 854 855 856 857 858 859
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
860 861 862
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
Ingo Molnar's avatar
Ingo Molnar committed
863 864
static noinline int
spurious_fault(unsigned long error_code, unsigned long address)
865 866 867 868 869
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
870
	int ret;
871 872 873 874 875 876 877 878 879 880 881 882 883

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

884 885 886
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

887 888 889 890
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

891 892 893
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

894 895 896 897
	pte = pte_offset_kernel(pmd, address);
	if (!pte_present(*pte))
		return 0;

898 899 900 901 902
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
903 904
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
905 906 907
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
Ingo Molnar's avatar
Ingo Molnar committed
908

909
	return ret;
910 911
}

912
int show_unhandled_signals = 1;
Linus Torvalds's avatar
Linus Torvalds committed
913

Ingo Molnar's avatar
Ingo Molnar committed
914 915
static inline int
access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
916 917
{
	if (write) {
Ingo Molnar's avatar
Ingo Molnar committed
918
		/* write, present and write, not present: */
919 920
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
Ingo Molnar's avatar
Ingo Molnar committed
921
		return 0;
922 923
	}

Ingo Molnar's avatar
Ingo Molnar committed
924 925 926 927 928 929 930 931
	/* read, present: */
	if (unlikely(error_code & PF_PROT))
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

932 933 934
	return 0;
}

935 936
static int fault_in_kernel_space(unsigned long address)
{
937
	return address >= TASK_SIZE_MAX;
938 939
}

Linus Torvalds's avatar
Linus Torvalds committed
940 941 942 943 944
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
945 946
dotraplinkage void __kprobes
do_page_fault(struct pt_regs *regs, unsigned long error_code)
Linus Torvalds's avatar
Linus Torvalds committed
947
{
Ingo Molnar's avatar
Ingo Molnar committed
948
	struct vm_area_struct *vma;
Linus Torvalds's avatar
Linus Torvalds committed
949
	struct task_struct *tsk;
Ingo Molnar's avatar
Ingo Molnar committed
950
	unsigned long address;
Linus Torvalds's avatar
Linus Torvalds committed
951
	struct mm_struct *mm;
952
	int write;
953
	int fault;
Linus Torvalds's avatar
Linus Torvalds committed
954

955 956
	tsk = current;
	mm = tsk->mm;
Ingo Molnar's avatar
Ingo Molnar committed
957 958

	/* Get the faulting address: */
959
	address = read_cr2();
Linus Torvalds's avatar
Linus Torvalds committed
960

Vegard Nossum's avatar
Vegard Nossum committed
961 962 963 964 965 966
	/*
	 * Detect and handle instructions that would cause a page fault for
	 * both a tracked kernel page and a userspace page.
	 */
	if (kmemcheck_active(regs))
		kmemcheck_hide(regs);
967
	prefetchw(&mm->mmap_sem);
Vegard Nossum's avatar
Vegard Nossum committed
968

969
	if (unlikely(kmmio_fault(regs, address)))
970
		return;
Linus Torvalds's avatar
Linus Torvalds committed
971 972 973 974 975 976 977 978 979 980 981 982

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
983
	 * protection error (error_code & 9) == 0.
Linus Torvalds's avatar
Linus Torvalds committed
984
	 */
985
	if (unlikely(fault_in_kernel_space(address))) {
Vegard Nossum's avatar
Vegard Nossum committed
986 987 988 989 990 991 992
		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
			if (vmalloc_fault(address) >= 0)
				return;

			if (kmemcheck_fault(regs, address, error_code))
				return;
		}
993

Ingo Molnar's avatar
Ingo Molnar committed
994
		/* Can handle a stale RO->RW TLB: */
995
		if (spurious_fault(error_code, address))
996 997
			return;

Ingo Molnar's avatar
Ingo Molnar committed
998
		/* kprobes don't want to hook the spurious faults: */
999 1000
		if (notify_page_fault(regs))
			return;
1001 1002
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
Ingo Molnar's avatar
Ingo Molnar committed
1003
		 * fault we could otherwise deadlock:
1004
		 */
1005
		bad_area_nosemaphore(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
1006

1007
		return;
1008 1009
	}

Ingo Molnar's avatar
Ingo Molnar committed
1010
	/* kprobes don't want to hook the spurious faults: */
1011
	if (unlikely(notify_page_fault(regs)))
1012
		return;
1013
	/*
1014 1015 1016 1017
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
Ingo Molnar's avatar
Ingo Molnar committed
1018
	 * potential system fault or CPU buglet:
1019
	 */
1020 1021 1022
	if (user_mode_vm(regs)) {
		local_irq_enable();
		error_code |= PF_USER;
Ingo Molnar's avatar
Ingo Molnar committed
1023 1024 1025 1026
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}
1027

1028
	if (unlikely(error_code & PF_RSVD))
1029
		pgtable_bad(regs, error_code, address);
Linus Torvalds's avatar
Linus Torvalds committed
1030

1031
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
1032

Linus Torvalds's avatar
Linus Torvalds committed
1033
	/*
Ingo Molnar's avatar
Ingo Molnar committed
1034 1035
	 * If we're in an interrupt, have no user context or are running
	 * in an atomic region then we must not take the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1036
	 */
1037 1038 1039 1040
	if (unlikely(in_atomic() || !mm)) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1041

1042 1043
	/*
	 * When running in the kernel we expect faults to occur only to
Ingo Molnar's avatar
Ingo Molnar committed
1044 1045 1046 1047 1048 1049 1050
	 * addresses in user space.  All other faults represent errors in
	 * the kernel and should generate an OOPS.  Unfortunately, in the
	 * case of an erroneous fault occurring in a code path which already
	 * holds mmap_sem we will deadlock attempting to validate the fault
	 * against the address space.  Luckily the kernel only validly
	 * references user space from well defined areas of code, which are
	 * listed in the exceptions table.
Linus Torvalds's avatar
Linus Torvalds committed
1051 1052
	 *
	 * As the vast majority of faults will be valid we will only perform
Ingo Molnar's avatar
Ingo Molnar committed
1053 1054 1055 1056
	 * the source reference check when there is a possibility of a
	 * deadlock. Attempt to lock the address space, if we cannot we then
	 * validate the source. If this is invalid we can skip the address
	 * space check, thus avoiding the deadlock:
Linus Torvalds's avatar
Linus Torvalds committed
1057
	 */
1058
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1059
		if ((error_code & PF_USER) == 0 &&
1060 1061 1062 1063
		    !search_exception_tables(regs->ip)) {
			bad_area_nosemaphore(regs, error_code, address);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1064
		down_read(&mm->mmap_sem);
1065 1066
	} else {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
1067 1068 1069
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1070 1071
		 */
		might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
1072 1073 1074
	}

	vma = find_vma(mm, address);
1075 1076 1077 1078 1079
	if (unlikely(!vma)) {
		bad_area(regs, error_code, address);
		return;
	}
	if (likely(vma->vm_start <= address))
Linus Torvalds's avatar
Linus Torvalds committed
1080
		goto good_area;
1081 1082 1083 1084
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
		bad_area(regs, error_code, address);
		return;
	}
1085
	if (error_code & PF_USER) {
1086 1087 1088
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter