fault.c 29.5 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2
/*
 *  Copyright (C) 1995  Linus Torvalds
Ingo Molnar's avatar
Ingo Molnar committed
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
Linus Torvalds's avatar
Linus Torvalds committed
5
 */
6 7 8 9 10 11 12
#include <linux/magic.h>		/* STACK_END_MAGIC		*/
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
#include <linux/kprobes.h>		/* __kprobes, ...		*/
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13
#include <linux/perf_event.h>		/* perf_sw_event		*/
14
#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
15
#include <linux/prefetch.h>		/* prefetchw			*/
Ingo Molnar's avatar
Ingo Molnar committed
16

17 18
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
Vegard Nossum's avatar
Vegard Nossum committed
19
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
20
#include <asm/fixmap.h>			/* VSYSCALL_START		*/
21
#include <asm/context_tracking.h>	/* exception_enter(), ...	*/
Linus Torvalds's avatar
Linus Torvalds committed
22

23
/*
Ingo Molnar's avatar
Ingo Molnar committed
24 25 26 27 28 29 30
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
31
 */
Ingo Molnar's avatar
Ingo Molnar committed
32 33 34 35 36 37 38 39
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
};
40

41
/*
42 43
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
44
 */
45 46
static inline int __kprobes
kmmio_fault(struct pt_regs *regs, unsigned long addr)
47
{
48 49 50 51
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
52 53
}

54
static inline int __kprobes notify_page_fault(struct pt_regs *regs)
55
{
56 57 58
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
59
	if (kprobes_built_in() && !user_mode_vm(regs)) {
60 61 62 63 64
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
65

66
	return ret;
67
}
68

69
/*
Ingo Molnar's avatar
Ingo Molnar committed
70 71 72 73 74 75
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
76
 *
Ingo Molnar's avatar
Ingo Molnar committed
77
 * 64-bit mode:
78
 *
Ingo Molnar's avatar
Ingo Molnar committed
79 80 81 82
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
83
 */
84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
110
		return (!user_mode(regs) || user_64bit_mode(regs));
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
131 132
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
133
{
Ingo Molnar's avatar
Ingo Molnar committed
134
	unsigned char *max_instr;
135
	unsigned char *instr;
136
	int prefetch = 0;
Linus Torvalds's avatar
Linus Torvalds committed
137

Ingo Molnar's avatar
Ingo Molnar committed
138 139 140 141
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
142
	if (error_code & PF_INSTR)
Linus Torvalds's avatar
Linus Torvalds committed
143
		return 0;
144

145
	instr = (void *)convert_ip_to_linear(current, regs);
146
	max_instr = instr + 15;
Linus Torvalds's avatar
Linus Torvalds committed
147

148
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
149 150
		return 0;

151
	while (instr < max_instr) {
Ingo Molnar's avatar
Ingo Molnar committed
152
		unsigned char opcode;
Linus Torvalds's avatar
Linus Torvalds committed
153

154
		if (probe_kernel_address(instr, opcode))
155
			break;
Linus Torvalds's avatar
Linus Torvalds committed
156 157 158

		instr++;

159
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
Linus Torvalds's avatar
Linus Torvalds committed
160 161 162 163 164
			break;
	}
	return prefetch;
}

Ingo Molnar's avatar
Ingo Molnar committed
165 166
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
167
		     struct task_struct *tsk, int fault)
168
{
169
	unsigned lsb = 0;
170 171
	siginfo_t info;

Ingo Molnar's avatar
Ingo Molnar committed
172 173 174 175
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
176 177 178 179 180
	if (fault & VM_FAULT_HWPOISON_LARGE)
		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
	if (fault & VM_FAULT_HWPOISON)
		lsb = PAGE_SHIFT;
	info.si_addr_lsb = lsb;
Ingo Molnar's avatar
Ingo Molnar committed
181

182 183 184
	force_sig_info(si_signo, &info, tsk);
}

185 186 187 188 189
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
190
{
191 192 193 194
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
Ingo Molnar's avatar
Ingo Molnar committed
195

196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

217
	if (!pmd_present(*pmd))
218
		set_pmd(pmd, *pmd_k);
219
	else
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {
		struct page *page;

237
		spin_lock(&pgd_lock);
238
		list_for_each_entry(page, &pgd_list, lru) {
239
			spinlock_t *pgt_lock;
240
			pmd_t *ret;
241

242
			/* the pgt_lock only for Xen */
243 244 245 246 247 248 249
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
			ret = vmalloc_sync_one(page_address(page), address);
			spin_unlock(pgt_lock);

			if (!ret)
250 251
				break;
		}
252
		spin_unlock(&pgd_lock);
253 254 255 256 257 258 259 260
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
261
static noinline __kprobes int vmalloc_fault(unsigned long address)
262 263 264 265 266 267 268 269 270
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

271 272
	WARN_ON_ONCE(in_nmi());

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
	unsigned long bit;

	if (!v8086_mode(regs))
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
		tsk->thread.screen_bitmap |= 1 << bit;
307
}
Linus Torvalds's avatar
Linus Torvalds committed
308

Akinobu Mita's avatar
Akinobu Mita committed
309
static bool low_pfn(unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
310
{
Akinobu Mita's avatar
Akinobu Mita committed
311 312
	return pfn < max_low_pfn;
}
313

Akinobu Mita's avatar
Akinobu Mita committed
314 315 316 317 318 319
static void dump_pagetable(unsigned long address)
{
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(address)];
	pmd_t *pmd;
	pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
320

321
#ifdef CONFIG_X86_PAE
Akinobu Mita's avatar
Akinobu Mita committed
322 323 324
	printk("*pdpt = %016Lx ", pgd_val(*pgd));
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
325
#endif
Akinobu Mita's avatar
Akinobu Mita committed
326 327
	pmd = pmd_offset(pud_offset(pgd, address), address);
	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
328 329 330 331 332

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
Ingo Molnar's avatar
Ingo Molnar committed
333
	 * it's allocated already:
334
	 */
Akinobu Mita's avatar
Akinobu Mita committed
335 336
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
337

Akinobu Mita's avatar
Akinobu Mita committed
338 339 340
	pte = pte_offset_kernel(pmd, address);
	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
341
	printk("\n");
342 343 344 345 346 347
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
348
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
349 350 351 352 353 354 355 356 357
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
358
static noinline __kprobes int vmalloc_fault(unsigned long address)
359 360 361 362 363 364 365 366 367 368
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

369 370
	WARN_ON_ONCE(in_nmi());

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
	else
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}

424
#ifdef CONFIG_CPU_SUP_AMD
425
static const char errata93_warning[] =
426 427 428 429 430
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
431
#endif
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
Akinobu Mita's avatar
Akinobu Mita committed
451 452
	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
	pgd_t *pgd = base + pgd_index(address);
Linus Torvalds's avatar
Linus Torvalds committed
453 454 455 456
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

Ingo Molnar's avatar
Ingo Molnar committed
457 458 459
	if (bad_address(pgd))
		goto bad;

460
	printk("PGD %lx ", pgd_val(*pgd));
Ingo Molnar's avatar
Ingo Molnar committed
461 462 463

	if (!pgd_present(*pgd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
464

465
	pud = pud_offset(pgd, address);
Ingo Molnar's avatar
Ingo Molnar committed
466 467 468
	if (bad_address(pud))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
469
	printk("PUD %lx ", pud_val(*pud));
470
	if (!pud_present(*pud) || pud_large(*pud))
Ingo Molnar's avatar
Ingo Molnar committed
471
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
472 473

	pmd = pmd_offset(pud, address);
Ingo Molnar's avatar
Ingo Molnar committed
474 475 476
	if (bad_address(pmd))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
477
	printk("PMD %lx ", pmd_val(*pmd));
Ingo Molnar's avatar
Ingo Molnar committed
478 479
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
480 481

	pte = pte_offset_kernel(pmd, address);
Ingo Molnar's avatar
Ingo Molnar committed
482 483 484
	if (bad_address(pte))
		goto bad;

485
	printk("PTE %lx", pte_val(*pte));
Ingo Molnar's avatar
Ingo Molnar committed
486
out:
Linus Torvalds's avatar
Linus Torvalds committed
487 488 489 490
	printk("\n");
	return;
bad:
	printk("BAD\n");
491 492
}

493
#endif /* CONFIG_X86_64 */
Linus Torvalds's avatar
Linus Torvalds committed
494

Ingo Molnar's avatar
Ingo Molnar committed
495 496 497 498 499 500 501 502 503 504 505 506 507
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
508
 */
509
static int is_errata93(struct pt_regs *regs, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
510
{
511 512 513 514 515
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
	    || boot_cpu_data.x86 != 0xf)
		return 0;

516
	if (address != regs->ip)
Linus Torvalds's avatar
Linus Torvalds committed
517
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
518

519
	if ((address >> 32) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
520
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
521

Linus Torvalds's avatar
Linus Torvalds committed
522
	address |= 0xffffffffUL << 32;
523 524
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
525
		printk_once(errata93_warning);
526
		regs->ip = address;
Linus Torvalds's avatar
Linus Torvalds committed
527 528
		return 1;
	}
529
#endif
Linus Torvalds's avatar
Linus Torvalds committed
530
	return 0;
531
}
Linus Torvalds's avatar
Linus Torvalds committed
532

533
/*
Ingo Molnar's avatar
Ingo Molnar committed
534 535 536 537 538
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
539 540 541 542 543
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
Ingo Molnar's avatar
Ingo Molnar committed
544
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
545 546 547 548 549
		return 1;
#endif
	return 0;
}

550 551 552 553
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
Ingo Molnar's avatar
Ingo Molnar committed
554

555
	/*
Ingo Molnar's avatar
Ingo Molnar committed
556
	 * Pentium F0 0F C7 C8 bug workaround:
557 558 559 560 561 562 563 564 565 566 567 568 569
	 */
	if (boot_cpu_data.f00f_bug) {
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

570 571 572
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";

Ingo Molnar's avatar
Ingo Molnar committed
573 574 575
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
576
{
577 578 579 580
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
581
		unsigned int level;
Ingo Molnar's avatar
Ingo Molnar committed
582

583 584
		pte_t *pte = lookup_address(address, &level);

585
		if (pte && pte_present(*pte) && !pte_exec(*pte))
586
			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
587 588
	}

589
	printk(KERN_ALERT "BUG: unable to handle kernel ");
590
	if (address < PAGE_SIZE)
591
		printk(KERN_CONT "NULL pointer dereference");
592
	else
593
		printk(KERN_CONT "paging request");
Ingo Molnar's avatar
Ingo Molnar committed
594

595
	printk(KERN_CONT " at %p\n", (void *) address);
596
	printk(KERN_ALERT "IP:");
597
	printk_address(regs->ip, 1);
Ingo Molnar's avatar
Ingo Molnar committed
598

599 600 601
	dump_pagetable(address);
}

Ingo Molnar's avatar
Ingo Molnar committed
602 603 604
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
605
{
Ingo Molnar's avatar
Ingo Molnar committed
606 607 608 609 610 611 612
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
613

Linus Torvalds's avatar
Linus Torvalds committed
614
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
615
	       tsk->comm, address);
Linus Torvalds's avatar
Linus Torvalds committed
616
	dump_pagetable(address);
Ingo Molnar's avatar
Ingo Molnar committed
617 618

	tsk->thread.cr2		= address;
619
	tsk->thread.trap_nr	= X86_TRAP_PF;
Ingo Molnar's avatar
Ingo Molnar committed
620 621
	tsk->thread.error_code	= error_code;

622
	if (__die("Bad pagetable", regs, error_code))
623
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
624

625
	oops_end(flags, regs, sig);
Linus Torvalds's avatar
Linus Torvalds committed
626 627
}

Ingo Molnar's avatar
Ingo Molnar committed
628 629
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
630
	   unsigned long address, int signal, int si_code)
631 632
{
	struct task_struct *tsk = current;
633
	unsigned long *stackend;
634 635 636
	unsigned long flags;
	int sig;

Ingo Molnar's avatar
Ingo Molnar committed
637
	/* Are we prepared to handle this kernel fault? */
638 639
	if (fixup_exception(regs)) {
		if (current_thread_info()->sig_on_uaccess_error && signal) {
640
			tsk->thread.trap_nr = X86_TRAP_PF;
641 642 643 644 645 646
			tsk->thread.error_code = error_code | PF_USER;
			tsk->thread.cr2 = address;

			/* XXX: hwpoison faults will set the wrong code. */
			force_sig_info_fault(signal, si_code, address, tsk, 0);
		}
647
		return;
648
	}
649 650

	/*
Ingo Molnar's avatar
Ingo Molnar committed
651 652 653 654 655 656 657
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
658
	 *
Ingo Molnar's avatar
Ingo Molnar committed
659
	 *   Hall of shame of CPU/BIOS bugs.
660 661 662 663 664 665 666 667 668
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
Ingo Molnar's avatar
Ingo Molnar committed
669
	 * terminate things with extreme prejudice:
670 671 672 673 674
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

Ingo Molnar's avatar
Ingo Molnar committed
675
	stackend = end_of_stack(tsk);
676
	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
677
		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
678

679
	tsk->thread.cr2		= address;
680
	tsk->thread.trap_nr	= X86_TRAP_PF;
681
	tsk->thread.error_code	= error_code;
682 683 684 685

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
686

687
	/* Executive summary in case the body of the oops scrolled away */
688
	printk(KERN_DEFAULT "CR2: %016lx\n", address);
Ingo Molnar's avatar
Ingo Molnar committed
689

690 691 692
	oops_end(flags, regs, sig);
}

Ingo Molnar's avatar
Ingo Molnar committed
693 694 695 696 697 698 699 700 701 702 703 704 705 706
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

707
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
Ingo Molnar's avatar
Ingo Molnar committed
708 709 710 711 712 713 714 715 716 717 718 719
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address, int si_code)
720 721 722 723 724 725
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
726
		 * It's possible to have interrupts off here:
727 728 729 730 731
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
Ingo Molnar's avatar
Ingo Molnar committed
732
		 * from user space:
733 734 735 736 737 738 739
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

740 741 742 743 744 745 746 747 748 749 750 751
#ifdef CONFIG_X86_64
		/*
		 * Instruction fetch faults in the vsyscall page might need
		 * emulation.
		 */
		if (unlikely((error_code & PF_INSTR) &&
			     ((address & ~0xfff) == VSYSCALL_START))) {
			if (emulate_vsyscall(regs, address))
				return;
		}
#endif

Ingo Molnar's avatar
Ingo Molnar committed
752 753 754 755 756 757
		if (unlikely(show_unhandled_signals))
			show_signal_msg(regs, error_code, address, tsk);

		/* Kernel addresses are always protection faults: */
		tsk->thread.cr2		= address;
		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
758
		tsk->thread.trap_nr	= X86_TRAP_PF;
759

760
		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
Ingo Molnar's avatar
Ingo Molnar committed
761

762 763 764 765 766 767
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

768
	no_context(regs, error_code, address, SIGSEGV, si_code);
769 770
}

Ingo Molnar's avatar
Ingo Molnar committed
771 772 773
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		     unsigned long address)
774 775 776 777
{
	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
778 779 780
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address, int si_code)
781 782 783 784 785 786 787 788 789 790 791 792
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	__bad_area_nosemaphore(regs, error_code, address, si_code);
}

Ingo Molnar's avatar
Ingo Molnar committed
793 794
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
795 796 797 798
{
	__bad_area(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
799 800 801
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
		      unsigned long address)
802 803 804 805 806
{
	__bad_area(regs, error_code, address, SEGV_ACCERR);
}

/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
Ingo Molnar's avatar
Ingo Molnar committed
807 808 809
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
	      unsigned long address)
810 811 812
{
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
Ingo Molnar's avatar
Ingo Molnar committed
813
	 * (which will retry the fault, or kill us if we got oom-killed):
814 815
	 */
	up_read(&current->mm->mmap_sem);
Ingo Molnar's avatar
Ingo Molnar committed
816

817 818 819
	pagefault_out_of_memory();
}

Ingo Molnar's avatar
Ingo Molnar committed
820
static void
821 822
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
	  unsigned int fault)
823 824 825
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
826
	int code = BUS_ADRERR;
827 828 829

	up_read(&mm->mmap_sem);

Ingo Molnar's avatar
Ingo Molnar committed
830
	/* Kernel mode? Handle exceptions or die: */
831
	if (!(error_code & PF_USER)) {
832
		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
833 834
		return;
	}
Ingo Molnar's avatar
Ingo Molnar committed
835

836
	/* User-space => ok to do another page fault: */
837 838
	if (is_prefetch(regs, error_code, address))
		return;
Ingo Molnar's avatar
Ingo Molnar committed
839 840 841

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
842
	tsk->thread.trap_nr	= X86_TRAP_PF;
Ingo Molnar's avatar
Ingo Molnar committed
843

844
#ifdef CONFIG_MEMORY_FAILURE
845
	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
846 847 848 849 850 851
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
852
	force_sig_info_fault(SIGBUS, code, address, tsk, fault);
853 854
}

855
static noinline int
Ingo Molnar's avatar
Ingo Molnar committed
856 857
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
858
{
859 860 861 862 863 864 865 866
	/*
	 * Pagefault was interrupted by SIGKILL. We have no reason to
	 * continue pagefault.
	 */
	if (fatal_signal_pending(current)) {
		if (!(fault & VM_FAULT_RETRY))
			up_read(&current->mm->mmap_sem);
		if (!(error_code & PF_USER))
867
			no_context(regs, error_code, address, 0, 0);
868 869 870 871 872
		return 1;
	}
	if (!(fault & VM_FAULT_ERROR))
		return 0;

Ingo Molnar's avatar
Ingo Molnar committed
873
	if (fault & VM_FAULT_OOM) {
874 875 876
		/* Kernel mode? Handle exceptions or die: */
		if (!(error_code & PF_USER)) {
			up_read(&current->mm->mmap_sem);
877 878
			no_context(regs, error_code, address,
				   SIGSEGV, SEGV_MAPERR);
879
			return 1;
880 881
		}

882
		out_of_memory(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
883
	} else {
884 885
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
			     VM_FAULT_HWPOISON_LARGE))
886
			do_sigbus(regs, error_code, address, fault);
Ingo Molnar's avatar
Ingo Molnar committed
887 888 889
		else
			BUG();
	}
890
	return 1;
891 892
}

893 894 895 896
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
897

898 899 900 901 902 903
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

904
/*
Ingo Molnar's avatar
Ingo Molnar committed
905 906 907 908 909 910 911 912
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
913 914 915
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
916
static noinline __kprobes int
Ingo Molnar's avatar
Ingo Molnar committed
917
spurious_fault(unsigned long error_code, unsigned long address)
918 919 920 921 922
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
923
	int ret;
924 925 926 927 928 929 930 931 932 933 934 935 936

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

937 938 939
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

940 941 942 943
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

944 945 946
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

947 948 949 950 951 952
	/*
	 * Note: don't use pte_present() here, since it returns true
	 * if the _PAGE_PROTNONE bit is set.  However, this aliases the
	 * _PAGE_GLOBAL bit, which for kernel pages give false positives
	 * when CONFIG_DEBUG_PAGEALLOC is used.
	 */
953
	pte = pte_offset_kernel(pmd, address);
954
	if (!(pte_flags(*pte) & _PAGE_PRESENT))
955 956
		return 0;

957 958 959 960 961
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
962 963
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
964 965 966
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
Ingo Molnar's avatar
Ingo Molnar committed
967

968
	return ret;
969 970
}

971
int show_unhandled_signals = 1;
Linus Torvalds's avatar
Linus Torvalds committed
972

Ingo Molnar's avatar
Ingo Molnar committed
973
static inline int
974
access_error(unsigned long error_code, struct vm_area_struct *vma)
975
{
976
	if (error_code & PF_WRITE) {
Ingo Molnar's avatar
Ingo Molnar committed
977
		/* write, present and write, not present: */
978 979
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
Ingo Molnar's avatar
Ingo Molnar committed
980
		return 0;
981 982
	}

Ingo Molnar's avatar
Ingo Molnar committed
983 984 985 986 987 988 989 990
	/* read, present: */
	if (unlikely(error_code & PF_PROT))
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

991 992 993
	return 0;
}

994 995
static int fault_in_kernel_space(unsigned long address)
{
996
	return address >= TASK_SIZE_MAX;
997 998
}

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
	if (error_code & PF_USER)
		return false;

	if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
		return false;

	return true;
}

Linus Torvalds's avatar
Linus Torvalds committed
1010 1011 1012 1013 1014
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
1015 1016
static void __kprobes
__do_page_fault(struct pt_regs *regs, unsigned long error_code)
Linus Torvalds's avatar
Linus Torvalds committed
1017
{
Ingo Molnar's avatar
Ingo Molnar committed
1018
	struct vm_area_struct *vma;
Linus Torvalds's avatar
Linus Torvalds committed
1019
	struct task_struct *tsk;
Ingo Molnar's avatar
Ingo Molnar committed
1020
	unsigned long address;
Linus Torvalds's avatar
Linus Torvalds committed
1021
	struct mm_struct *mm;
1022
	int fault;
1023
	int write = error_code & PF_WRITE;
1024
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
1025
					(write ? FAULT_FLAG_WRITE : 0);