fault.c 26.7 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
/*
 *  Copyright (C) 1995  Linus Torvalds
Ingo Molnar's avatar
Ingo Molnar committed
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
Linus Torvalds's avatar
Linus Torvalds committed
5
 */
6
7
8
9
10
11
12
#include <linux/magic.h>		/* STACK_END_MAGIC		*/
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
#include <linux/kprobes.h>		/* __kprobes, ...		*/
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13
#include <linux/perf_event.h>		/* perf_sw_event		*/
Ingo Molnar's avatar
Ingo Molnar committed
14

15
16
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
Vegard Nossum's avatar
Vegard Nossum committed
17
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
Linus Torvalds's avatar
Linus Torvalds committed
18

19
/*
Ingo Molnar's avatar
Ingo Molnar committed
20
21
22
23
24
25
26
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
27
 */
Ingo Molnar's avatar
Ingo Molnar committed
28
29
30
31
32
33
34
35
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
};
36

37
/*
38
39
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
40
 */
41
42
static inline int __kprobes
kmmio_fault(struct pt_regs *regs, unsigned long addr)
43
{
44
45
46
47
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
48
49
}

50
static inline int __kprobes notify_page_fault(struct pt_regs *regs)
51
{
52
53
54
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
55
	if (kprobes_built_in() && !user_mode_vm(regs)) {
56
57
58
59
60
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
61

62
	return ret;
63
}
64

65
/*
Ingo Molnar's avatar
Ingo Molnar committed
66
67
68
69
70
71
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
72
 *
Ingo Molnar's avatar
Ingo Molnar committed
73
 * 64-bit mode:
74
 *
Ingo Molnar's avatar
Ingo Molnar committed
75
76
77
78
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
79
 */
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
		return (!user_mode(regs)) || (regs->cs == __USER_CS);
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
127
128
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
129
{
Ingo Molnar's avatar
Ingo Molnar committed
130
	unsigned char *max_instr;
131
	unsigned char *instr;
132
	int prefetch = 0;
Linus Torvalds's avatar
Linus Torvalds committed
133

Ingo Molnar's avatar
Ingo Molnar committed
134
135
136
137
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
138
	if (error_code & PF_INSTR)
Linus Torvalds's avatar
Linus Torvalds committed
139
		return 0;
140

141
	instr = (void *)convert_ip_to_linear(current, regs);
142
	max_instr = instr + 15;
Linus Torvalds's avatar
Linus Torvalds committed
143

144
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
145
146
		return 0;

147
	while (instr < max_instr) {
Ingo Molnar's avatar
Ingo Molnar committed
148
		unsigned char opcode;
Linus Torvalds's avatar
Linus Torvalds committed
149

150
		if (probe_kernel_address(instr, opcode))
151
			break;
Linus Torvalds's avatar
Linus Torvalds committed
152
153
154

		instr++;

155
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
Linus Torvalds's avatar
Linus Torvalds committed
156
157
158
159
160
			break;
	}
	return prefetch;
}

Ingo Molnar's avatar
Ingo Molnar committed
161
162
163
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
		     struct task_struct *tsk)
164
165
166
{
	siginfo_t info;

Ingo Molnar's avatar
Ingo Molnar committed
167
168
169
170
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
171
	info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0;
Ingo Molnar's avatar
Ingo Molnar committed
172

173
174
175
	force_sig_info(si_signo, &info, tsk);
}

176
177
178
179
180
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
181
{
182
183
184
185
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
Ingo Molnar's avatar
Ingo Molnar committed
186

187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

208
	if (!pmd_present(*pmd))
209
		set_pmd(pmd, *pmd_k);
210
	else
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {

		unsigned long flags;
		struct page *page;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
			if (!vmalloc_sync_one(page_address(page), address))
				break;
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
244
static noinline __kprobes int vmalloc_fault(unsigned long address)
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
	unsigned long bit;

	if (!v8086_mode(regs))
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
		tsk->thread.screen_bitmap |= 1 << bit;
288
}
Linus Torvalds's avatar
Linus Torvalds committed
289

Akinobu Mita's avatar
Akinobu Mita committed
290
static bool low_pfn(unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
291
{
Akinobu Mita's avatar
Akinobu Mita committed
292
293
	return pfn < max_low_pfn;
}
294

Akinobu Mita's avatar
Akinobu Mita committed
295
296
297
298
299
300
static void dump_pagetable(unsigned long address)
{
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(address)];
	pmd_t *pmd;
	pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
301

302
#ifdef CONFIG_X86_PAE
Akinobu Mita's avatar
Akinobu Mita committed
303
304
305
	printk("*pdpt = %016Lx ", pgd_val(*pgd));
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
306
#endif
Akinobu Mita's avatar
Akinobu Mita committed
307
308
	pmd = pmd_offset(pud_offset(pgd, address), address);
	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
309
310
311
312
313

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
Ingo Molnar's avatar
Ingo Molnar committed
314
	 * it's allocated already:
315
	 */
Akinobu Mita's avatar
Akinobu Mita committed
316
317
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
318

Akinobu Mita's avatar
Akinobu Mita committed
319
320
321
	pte = pte_offset_kernel(pmd, address);
	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
322
	printk("\n");
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
	unsigned long address;

	for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
	     address += PGDIR_SIZE) {

		const pgd_t *pgd_ref = pgd_offset_k(address);
		unsigned long flags;
		struct page *page;

		if (pgd_none(*pgd_ref))
			continue;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
			pgd_t *pgd;
			pgd = (pgd_t *)page_address(page) + pgd_index(address);
			if (pgd_none(*pgd))
				set_pgd(pgd, *pgd_ref);
			else
				BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
361
static noinline __kprobes int vmalloc_fault(unsigned long address)
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
	else
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}

static const char errata93_warning[] =
426
427
428
429
430
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
Akinobu Mita's avatar
Akinobu Mita committed
450
451
	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
	pgd_t *pgd = base + pgd_index(address);
Linus Torvalds's avatar
Linus Torvalds committed
452
453
454
455
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

Ingo Molnar's avatar
Ingo Molnar committed
456
457
458
	if (bad_address(pgd))
		goto bad;

459
	printk("PGD %lx ", pgd_val(*pgd));
Ingo Molnar's avatar
Ingo Molnar committed
460
461
462

	if (!pgd_present(*pgd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
463

464
	pud = pud_offset(pgd, address);
Ingo Molnar's avatar
Ingo Molnar committed
465
466
467
	if (bad_address(pud))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
468
	printk("PUD %lx ", pud_val(*pud));
469
	if (!pud_present(*pud) || pud_large(*pud))
Ingo Molnar's avatar
Ingo Molnar committed
470
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
471
472

	pmd = pmd_offset(pud, address);
Ingo Molnar's avatar
Ingo Molnar committed
473
474
475
	if (bad_address(pmd))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
476
	printk("PMD %lx ", pmd_val(*pmd));
Ingo Molnar's avatar
Ingo Molnar committed
477
478
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
479
480

	pte = pte_offset_kernel(pmd, address);
Ingo Molnar's avatar
Ingo Molnar committed
481
482
483
	if (bad_address(pte))
		goto bad;

484
	printk("PTE %lx", pte_val(*pte));
Ingo Molnar's avatar
Ingo Molnar committed
485
out:
Linus Torvalds's avatar
Linus Torvalds committed
486
487
488
489
	printk("\n");
	return;
bad:
	printk("BAD\n");
490
491
}

492
#endif /* CONFIG_X86_64 */
Linus Torvalds's avatar
Linus Torvalds committed
493

Ingo Molnar's avatar
Ingo Molnar committed
494
495
496
497
498
499
500
501
502
503
504
505
506
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
507
 */
508
static int is_errata93(struct pt_regs *regs, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
509
{
510
#ifdef CONFIG_X86_64
511
	if (address != regs->ip)
Linus Torvalds's avatar
Linus Torvalds committed
512
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
513

514
	if ((address >> 32) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
515
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
516

Linus Torvalds's avatar
Linus Torvalds committed
517
	address |= 0xffffffffUL << 32;
518
519
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
520
		printk_once(errata93_warning);
521
		regs->ip = address;
Linus Torvalds's avatar
Linus Torvalds committed
522
523
		return 1;
	}
524
#endif
Linus Torvalds's avatar
Linus Torvalds committed
525
	return 0;
526
}
Linus Torvalds's avatar
Linus Torvalds committed
527

528
/*
Ingo Molnar's avatar
Ingo Molnar committed
529
530
531
532
533
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
534
535
536
537
538
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
Ingo Molnar's avatar
Ingo Molnar committed
539
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
540
541
542
543
544
		return 1;
#endif
	return 0;
}

545
546
547
548
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
Ingo Molnar's avatar
Ingo Molnar committed
549

550
	/*
Ingo Molnar's avatar
Ingo Molnar committed
551
	 * Pentium F0 0F C7 C8 bug workaround:
552
553
554
555
556
557
558
559
560
561
562
563
564
	 */
	if (boot_cpu_data.f00f_bug) {
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

565
566
567
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";

Ingo Molnar's avatar
Ingo Molnar committed
568
569
570
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
571
{
572
573
574
575
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
576
		unsigned int level;
Ingo Molnar's avatar
Ingo Molnar committed
577

578
579
		pte_t *pte = lookup_address(address, &level);

580
581
		if (pte && pte_present(*pte) && !pte_exec(*pte))
			printk(nx_warning, current_uid());
582
583
	}

584
	printk(KERN_ALERT "BUG: unable to handle kernel ");
585
	if (address < PAGE_SIZE)
586
		printk(KERN_CONT "NULL pointer dereference");
587
	else
588
		printk(KERN_CONT "paging request");
Ingo Molnar's avatar
Ingo Molnar committed
589

590
	printk(KERN_CONT " at %p\n", (void *) address);
591
	printk(KERN_ALERT "IP:");
592
	printk_address(regs->ip, 1);
Ingo Molnar's avatar
Ingo Molnar committed
593

594
595
596
	dump_pagetable(address);
}

Ingo Molnar's avatar
Ingo Molnar committed
597
598
599
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
600
{
Ingo Molnar's avatar
Ingo Molnar committed
601
602
603
604
605
606
607
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
608

Linus Torvalds's avatar
Linus Torvalds committed
609
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
610
	       tsk->comm, address);
Linus Torvalds's avatar
Linus Torvalds committed
611
	dump_pagetable(address);
Ingo Molnar's avatar
Ingo Molnar committed
612
613
614
615
616

	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;

617
	if (__die("Bad pagetable", regs, error_code))
618
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
619

620
	oops_end(flags, regs, sig);
Linus Torvalds's avatar
Linus Torvalds committed
621
622
}

Ingo Molnar's avatar
Ingo Molnar committed
623
624
625
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address)
626
627
{
	struct task_struct *tsk = current;
628
	unsigned long *stackend;
629
630
631
	unsigned long flags;
	int sig;

Ingo Molnar's avatar
Ingo Molnar committed
632
	/* Are we prepared to handle this kernel fault? */
633
634
635
636
	if (fixup_exception(regs))
		return;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
637
638
639
640
641
642
643
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
644
	 *
Ingo Molnar's avatar
Ingo Molnar committed
645
	 *   Hall of shame of CPU/BIOS bugs.
646
647
648
649
650
651
652
653
654
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
Ingo Molnar's avatar
Ingo Molnar committed
655
	 * terminate things with extreme prejudice:
656
657
658
659
660
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

Ingo Molnar's avatar
Ingo Molnar committed
661
	stackend = end_of_stack(tsk);
662
	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
663
664
		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");

665
666
667
	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;
668
669
670
671

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
672

673
674
	/* Executive summary in case the body of the oops scrolled away */
	printk(KERN_EMERG "CR2: %016lx\n", address);
Ingo Molnar's avatar
Ingo Molnar committed
675

676
677
678
	oops_end(flags, regs, sig);
}

Ingo Molnar's avatar
Ingo Molnar committed
679
680
681
682
683
684
685
686
687
688
689
690
691
692
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

693
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
Ingo Molnar's avatar
Ingo Molnar committed
694
695
696
697
698
699
700
701
702
703
704
705
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address, int si_code)
706
707
708
709
710
711
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
712
		 * It's possible to have interrupts off here:
713
714
715
716
717
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
Ingo Molnar's avatar
Ingo Molnar committed
718
		 * from user space:
719
720
721
722
723
724
725
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

Ingo Molnar's avatar
Ingo Molnar committed
726
727
728
729
730
731
732
		if (unlikely(show_unhandled_signals))
			show_signal_msg(regs, error_code, address, tsk);

		/* Kernel addresses are always protection faults: */
		tsk->thread.cr2		= address;
		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
		tsk->thread.trap_no	= 14;
733
734

		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
Ingo Molnar's avatar
Ingo Molnar committed
735

736
737
738
739
740
741
742
743
744
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

	no_context(regs, error_code, address);
}

Ingo Molnar's avatar
Ingo Molnar committed
745
746
747
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		     unsigned long address)
748
749
750
751
{
	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
752
753
754
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address, int si_code)
755
756
757
758
759
760
761
762
763
764
765
766
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	__bad_area_nosemaphore(regs, error_code, address, si_code);
}

Ingo Molnar's avatar
Ingo Molnar committed
767
768
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
769
770
771
772
{
	__bad_area(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
773
774
775
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
		      unsigned long address)
776
777
778
779
780
{
	__bad_area(regs, error_code, address, SEGV_ACCERR);
}

/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
Ingo Molnar's avatar
Ingo Molnar committed
781
782
783
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
	      unsigned long address)
784
785
786
{
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
Ingo Molnar's avatar
Ingo Molnar committed
787
	 * (which will retry the fault, or kill us if we got oom-killed):
788
789
	 */
	up_read(&current->mm->mmap_sem);
Ingo Molnar's avatar
Ingo Molnar committed
790

791
792
793
	pagefault_out_of_memory();
}

Ingo Molnar's avatar
Ingo Molnar committed
794
static void
795
796
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
	  unsigned int fault)
797
798
799
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
800
	int code = BUS_ADRERR;
801
802
803

	up_read(&mm->mmap_sem);

Ingo Molnar's avatar
Ingo Molnar committed
804
	/* Kernel mode? Handle exceptions or die: */
805
	if (!(error_code & PF_USER)) {
806
		no_context(regs, error_code, address);
807
808
		return;
	}
Ingo Molnar's avatar
Ingo Molnar committed
809

810
	/* User-space => ok to do another page fault: */
811
812
	if (is_prefetch(regs, error_code, address))
		return;
Ingo Molnar's avatar
Ingo Molnar committed
813
814
815
816
817

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
	tsk->thread.trap_no	= 14;

818
819
820
821
822
823
824
825
826
#ifdef CONFIG_MEMORY_FAILURE
	if (fault & VM_FAULT_HWPOISON) {
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
	force_sig_info_fault(SIGBUS, code, address, tsk);
827
828
}

Ingo Molnar's avatar
Ingo Molnar committed
829
830
831
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
832
{
Ingo Molnar's avatar
Ingo Molnar committed
833
	if (fault & VM_FAULT_OOM) {
834
		out_of_memory(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
835
	} else {
836
837
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
			do_sigbus(regs, error_code, address, fault);
Ingo Molnar's avatar
Ingo Molnar committed
838
839
840
		else
			BUG();
	}
841
842
}

843
844
845
846
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
847

848
849
850
851
852
853
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

854
/*
Ingo Molnar's avatar
Ingo Molnar committed
855
856
857
858
859
860
861
862
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
863
864
865
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
866
static noinline __kprobes int
Ingo Molnar's avatar
Ingo Molnar committed
867
spurious_fault(unsigned long error_code, unsigned long address)
868
869
870
871
872
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
873
	int ret;
874
875
876
877
878
879
880
881
882
883
884
885
886

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

887
888
889
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

890
891
892
893
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

894
895
896
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

897
898
899
900
	pte = pte_offset_kernel(pmd, address);
	if (!pte_present(*pte))
		return 0;

901
902
903
904
905
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
906
907
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
908
909
910
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
Ingo Molnar's avatar
Ingo Molnar committed
911

912
	return ret;
913
914
}

915
int show_unhandled_signals = 1;
Linus Torvalds's avatar
Linus Torvalds committed
916

Ingo Molnar's avatar
Ingo Molnar committed
917
918
static inline int
access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
919
920
{
	if (write) {
Ingo Molnar's avatar
Ingo Molnar committed
921
		/* write, present and write, not present: */
922
923
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
Ingo Molnar's avatar
Ingo Molnar committed
924
		return 0;
925
926
	}

Ingo Molnar's avatar
Ingo Molnar committed
927
928
929
930
931
932
933
934
	/* read, present: */
	if (unlikely(error_code & PF_PROT))
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

935
936
937
	return 0;
}

938
939
static int fault_in_kernel_space(unsigned long address)
{
940
	return address >= TASK_SIZE_MAX;
941
942
}

Linus Torvalds's avatar
Linus Torvalds committed
943
944
945
946
947
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
948
949
dotraplinkage void __kprobes
do_page_fault(struct pt_regs *regs, unsigned long error_code)
Linus Torvalds's avatar
Linus Torvalds committed
950
{
Ingo Molnar's avatar
Ingo Molnar committed
951
	struct vm_area_struct *vma;
Linus Torvalds's avatar
Linus Torvalds committed
952
	struct task_struct *tsk;
Ingo Molnar's avatar
Ingo Molnar committed
953
	unsigned long address;
Linus Torvalds's avatar
Linus Torvalds committed
954
	struct mm_struct *mm;
955
	int write;
956
	int fault;
Linus Torvalds's avatar
Linus Torvalds committed
957

958
959
	tsk = current;
	mm = tsk->mm;
Ingo Molnar's avatar
Ingo Molnar committed
960
961

	/* Get the faulting address: */
962
	address = read_cr2();
Linus Torvalds's avatar
Linus Torvalds committed
963

Vegard Nossum's avatar
Vegard Nossum committed
964
965
966
967
968
969
	/*
	 * Detect and handle instructions that would cause a page fault for
	 * both a tracked kernel page and a userspace page.
	 */
	if (kmemcheck_active(regs))
		kmemcheck_hide(regs);
970
	prefetchw(&mm->mmap_sem);
Vegard Nossum's avatar
Vegard Nossum committed
971

972
	if (unlikely(kmmio_fault(regs, address)))
973
		return;
Linus Torvalds's avatar
Linus Torvalds committed
974
975
976
977
978
979
980
981
982
983
984
985

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
986
	 * protection error (error_code & 9) == 0.
Linus Torvalds's avatar
Linus Torvalds committed
987
	 */
988
	if (unlikely(fault_in_kernel_space(address))) {
Vegard Nossum's avatar
Vegard Nossum committed
989
990
991
992
993
994
995
		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
			if (vmalloc_fault(address) >= 0)
				return;

			if (kmemcheck_fault(regs, address, error_code))
				return;
		}
996

Ingo Molnar's avatar
Ingo Molnar committed
997
		/* Can handle a stale RO->RW TLB: */
998
		if (spurious_fault(error_code, address))
999
1000
			return;

Ingo Molnar's avatar
Ingo Molnar committed
1001
		/* kprobes don't want to hook the spurious faults: */
1002
1003
		if (notify_page_fault(regs))
			return;
1004
1005
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
Ingo Molnar's avatar
Ingo Molnar committed
1006
		 * fault we could otherwise deadlock:
1007
		 */
1008
		bad_area_nosemaphore(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
1009

1010
		return;
1011
1012
	}

Ingo Molnar's avatar
Ingo Molnar committed
1013
	/* kprobes don't want to hook the spurious faults: */
1014
	if (unlikely(notify_page_fault(regs)))
1015
		return;
1016
	/*
1017
1018
1019
1020
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
Ingo Molnar's avatar
Ingo Molnar committed
1021
	 * potential system fault or CPU buglet:
1022
	 */
1023
1024
1025
	if (user_mode_vm(regs)) {
		local_irq_enable();
		error_code |= PF_USER;
Ingo Molnar's avatar
Ingo Molnar committed
1026
1027
1028
1029
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}
1030

1031
	if (unlikely(error_code & PF_RSVD))
1032
		pgtable_bad(regs, error_code, address);
Linus Torvalds's avatar
Linus Torvalds committed
1033

1034
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
1035

Linus Torvalds's avatar
Linus Torvalds committed
1036
	/*
Ingo Molnar's avatar
Ingo Molnar committed
1037
1038
	 * If we're in an interrupt, have no user context or are running
	 * in an atomic region then we must not take the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1039
	 */
1040
1041
1042
1043
	if (unlikely(in_atomic() || !mm)) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1044

1045
1046
	/*
	 * When running in the kernel we expect faults to occur only to
Ingo Molnar's avatar
Ingo Molnar committed
1047
1048
1049
1050
1051
1052
1053
	 * addresses in user space.  All other faults represent errors in
	 * the kernel and should generate an OOPS.  Unfortunately, in the
	 * case of an erroneous fault occurring in a code path which already
	 * holds mmap_sem we will deadlock attempting to validate the fault
	 * against the address space.  Luckily the kernel only validly
	 * references user space from well defined areas of code, which are
	 * listed in the exceptions table.
Linus Torvalds's avatar
Linus Torvalds committed
1054
1055
	 *
	 * As the vast majority of faults will be valid we will only perform
Ingo Molnar's avatar
Ingo Molnar committed
1056
1057
1058
1059
	 * the source reference check when there is a possibility of a
	 * deadlock. Attempt to lock the address space, if we cannot we then
	 * validate the source. If this is invalid we can skip the address
	 * space check, thus avoiding the deadlock:
Linus Torvalds's avatar
Linus Torvalds committed
1060
	 */
1061
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1062
		if ((error_code & PF_USER) == 0 &&
1063
1064
1065
1066
		    !search_exception_tables(regs->ip)) {
			bad_area_nosemaphore(regs, error_code, address);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1067
		down_read(&mm->mmap_sem);
1068
1069
	} else {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
1070
1071
1072
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1073
1074
		 */
		might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
1075
1076
1077
	}

	vma = find_vma(mm, address);
1078
1079
1080
1081
1082
	if (unlikely(!vma)) {
		bad_area(regs, error_code, address);
		return;
	}
	if (likely(vma->vm_start <= address))
Linus Torvalds's avatar
Linus Torvalds committed
1083
		goto good_area;
1084
1085
1086
1087
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
		bad_area(regs, error_code, address);
		return;
	}
1088
	if (error_code & PF_USER) {
1089
1090
1091
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
Ingo Molnar's avatar
Ingo Molnar committed
1092
		 * and pusha to work. ("enter $65535, $31" pushes
1093
		 * 32 pointers and then decrements %sp by 65535.)
1094
		 */
1095
1096
1097
1098
		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
			bad_area(regs, error_code, address);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1099
	}
1100
1101
1102
1103
1104
1105
1106
1107
1108
	if (unlikely(expand_stack(vma, address))) {
		bad_area(regs, error_code, address);
		return;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1109
good_area:
1110
	write = error_code & PF_WRITE;
Ingo Molnar's avatar
Ingo Molnar committed
1111

1112
1113
1114
	if (unlikely(access_error(error_code, write, vma))) {
		bad_area_access_error(regs, error_code, address);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1115
1116
1117
1118
1119
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
Ingo Molnar's avatar
Ingo Molnar committed
1120
	 * the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1121
	 */
1122
	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
Ingo Molnar's avatar
Ingo Molnar committed
1123

Nick Piggin's avatar
Nick Piggin committed
1124
	if (unlikely(fault & VM_FAULT_ERROR)) {
1125
1126
		mm_fault_error(regs, error_code, address, fault);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1127
	}
Ingo Molnar's avatar
Ingo Molnar committed
1128

1129
	if (fault & VM_FAULT_MAJOR) {
Nick Piggin's avatar
Nick Piggin committed
1130
		tsk->maj_flt++;
1131
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
1132
				     regs, address);
1133
	} else {
Nick Piggin's avatar
Nick Piggin committed
1134
		tsk->min_flt++;
1135
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
1136
				     regs, address);
1137
	}
1138

1139
1140
	check_v8086_mode(regs, address, tsk);

Linus Torvalds's avatar
Linus Torvalds committed
1141
1142
	up_read(&mm->mmap_sem);
}