fault.c 26.6 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
/*
 *  Copyright (C) 1995  Linus Torvalds
Ingo Molnar's avatar
Ingo Molnar committed
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
Linus Torvalds's avatar
Linus Torvalds committed
5
 */
6
7
8
9
10
11
12
#include <linux/magic.h>		/* STACK_END_MAGIC		*/
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
#include <linux/kprobes.h>		/* __kprobes, ...		*/
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13
#include <linux/perf_event.h>		/* perf_sw_event		*/
Ingo Molnar's avatar
Ingo Molnar committed
14

15
16
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
Vegard Nossum's avatar
Vegard Nossum committed
17
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
Linus Torvalds's avatar
Linus Torvalds committed
18

19
/*
Ingo Molnar's avatar
Ingo Molnar committed
20
21
22
23
24
25
26
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
27
 */
Ingo Molnar's avatar
Ingo Molnar committed
28
29
30
31
32
33
34
35
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
};
36

37
/*
38
39
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
40
 */
41
42
static inline int __kprobes
kmmio_fault(struct pt_regs *regs, unsigned long addr)
43
{
44
45
46
47
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
48
49
}

50
static inline int __kprobes notify_page_fault(struct pt_regs *regs)
51
{
52
53
54
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
55
	if (kprobes_built_in() && !user_mode_vm(regs)) {
56
57
58
59
60
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
61

62
	return ret;
63
}
64

65
/*
Ingo Molnar's avatar
Ingo Molnar committed
66
67
68
69
70
71
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
72
 *
Ingo Molnar's avatar
Ingo Molnar committed
73
 * 64-bit mode:
74
 *
Ingo Molnar's avatar
Ingo Molnar committed
75
76
77
78
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
79
 */
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
		return (!user_mode(regs)) || (regs->cs == __USER_CS);
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
127
128
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
129
{
Ingo Molnar's avatar
Ingo Molnar committed
130
	unsigned char *max_instr;
131
	unsigned char *instr;
132
	int prefetch = 0;
Linus Torvalds's avatar
Linus Torvalds committed
133

Ingo Molnar's avatar
Ingo Molnar committed
134
135
136
137
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
138
	if (error_code & PF_INSTR)
Linus Torvalds's avatar
Linus Torvalds committed
139
		return 0;
140

141
	instr = (void *)convert_ip_to_linear(current, regs);
142
	max_instr = instr + 15;
Linus Torvalds's avatar
Linus Torvalds committed
143

144
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
145
146
		return 0;

147
	while (instr < max_instr) {
Ingo Molnar's avatar
Ingo Molnar committed
148
		unsigned char opcode;
Linus Torvalds's avatar
Linus Torvalds committed
149

150
		if (probe_kernel_address(instr, opcode))
151
			break;
Linus Torvalds's avatar
Linus Torvalds committed
152
153
154

		instr++;

155
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
Linus Torvalds's avatar
Linus Torvalds committed
156
157
158
159
160
			break;
	}
	return prefetch;
}

Ingo Molnar's avatar
Ingo Molnar committed
161
162
163
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
		     struct task_struct *tsk)
164
165
166
{
	siginfo_t info;

Ingo Molnar's avatar
Ingo Molnar committed
167
168
169
170
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
171
	info.si_addr_lsb = si_code == BUS_MCEERR_AR ? PAGE_SHIFT : 0;
Ingo Molnar's avatar
Ingo Molnar committed
172

173
174
175
	force_sig_info(si_signo, &info, tsk);
}

176
177
178
179
180
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
181
{
182
183
184
185
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
Ingo Molnar's avatar
Ingo Molnar committed
186

187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

208
	if (!pmd_present(*pmd))
209
		set_pmd(pmd, *pmd_k);
210
	else
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {

		unsigned long flags;
		struct page *page;

		spin_lock_irqsave(&pgd_lock, flags);
		list_for_each_entry(page, &pgd_list, lru) {
232
233
234
235
236
237
238
239
240
241
			spinlock_t *pgt_lock;
			int ret;

			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
			ret = vmalloc_sync_one(page_address(page), address);
			spin_unlock(pgt_lock);

			if (!ret)
242
243
244
245
246
247
248
249
250
251
252
				break;
		}
		spin_unlock_irqrestore(&pgd_lock, flags);
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
253
static noinline __kprobes int vmalloc_fault(unsigned long address)
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
	unsigned long bit;

	if (!v8086_mode(regs))
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
		tsk->thread.screen_bitmap |= 1 << bit;
297
}
Linus Torvalds's avatar
Linus Torvalds committed
298

Akinobu Mita's avatar
Akinobu Mita committed
299
static bool low_pfn(unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
300
{
Akinobu Mita's avatar
Akinobu Mita committed
301
302
	return pfn < max_low_pfn;
}
303

Akinobu Mita's avatar
Akinobu Mita committed
304
305
306
307
308
309
static void dump_pagetable(unsigned long address)
{
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(address)];
	pmd_t *pmd;
	pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
310

311
#ifdef CONFIG_X86_PAE
Akinobu Mita's avatar
Akinobu Mita committed
312
313
314
	printk("*pdpt = %016Lx ", pgd_val(*pgd));
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
315
#endif
Akinobu Mita's avatar
Akinobu Mita committed
316
317
	pmd = pmd_offset(pud_offset(pgd, address), address);
	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
318
319
320
321
322

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
Ingo Molnar's avatar
Ingo Molnar committed
323
	 * it's allocated already:
324
	 */
Akinobu Mita's avatar
Akinobu Mita committed
325
326
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
327

Akinobu Mita's avatar
Akinobu Mita committed
328
329
330
	pte = pte_offset_kernel(pmd, address);
	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
331
	printk("\n");
332
333
334
335
336
337
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
338
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
339
340
341
342
343
344
345
346
347
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
348
static noinline __kprobes int vmalloc_fault(unsigned long address)
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
	else
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}

static const char errata93_warning[] =
413
414
415
416
417
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
Akinobu Mita's avatar
Akinobu Mita committed
437
438
	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
	pgd_t *pgd = base + pgd_index(address);
Linus Torvalds's avatar
Linus Torvalds committed
439
440
441
442
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

Ingo Molnar's avatar
Ingo Molnar committed
443
444
445
	if (bad_address(pgd))
		goto bad;

446
	printk("PGD %lx ", pgd_val(*pgd));
Ingo Molnar's avatar
Ingo Molnar committed
447
448
449

	if (!pgd_present(*pgd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
450

451
	pud = pud_offset(pgd, address);
Ingo Molnar's avatar
Ingo Molnar committed
452
453
454
	if (bad_address(pud))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
455
	printk("PUD %lx ", pud_val(*pud));
456
	if (!pud_present(*pud) || pud_large(*pud))
Ingo Molnar's avatar
Ingo Molnar committed
457
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
458
459

	pmd = pmd_offset(pud, address);
Ingo Molnar's avatar
Ingo Molnar committed
460
461
462
	if (bad_address(pmd))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
463
	printk("PMD %lx ", pmd_val(*pmd));
Ingo Molnar's avatar
Ingo Molnar committed
464
465
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
466
467

	pte = pte_offset_kernel(pmd, address);
Ingo Molnar's avatar
Ingo Molnar committed
468
469
470
	if (bad_address(pte))
		goto bad;

471
	printk("PTE %lx", pte_val(*pte));
Ingo Molnar's avatar
Ingo Molnar committed
472
out:
Linus Torvalds's avatar
Linus Torvalds committed
473
474
475
476
	printk("\n");
	return;
bad:
	printk("BAD\n");
477
478
}

479
#endif /* CONFIG_X86_64 */
Linus Torvalds's avatar
Linus Torvalds committed
480

Ingo Molnar's avatar
Ingo Molnar committed
481
482
483
484
485
486
487
488
489
490
491
492
493
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
494
 */
495
static int is_errata93(struct pt_regs *regs, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
496
{
497
#ifdef CONFIG_X86_64
498
	if (address != regs->ip)
Linus Torvalds's avatar
Linus Torvalds committed
499
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
500

501
	if ((address >> 32) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
502
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
503

Linus Torvalds's avatar
Linus Torvalds committed
504
	address |= 0xffffffffUL << 32;
505
506
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
507
		printk_once(errata93_warning);
508
		regs->ip = address;
Linus Torvalds's avatar
Linus Torvalds committed
509
510
		return 1;
	}
511
#endif
Linus Torvalds's avatar
Linus Torvalds committed
512
	return 0;
513
}
Linus Torvalds's avatar
Linus Torvalds committed
514

515
/*
Ingo Molnar's avatar
Ingo Molnar committed
516
517
518
519
520
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
521
522
523
524
525
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
Ingo Molnar's avatar
Ingo Molnar committed
526
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
527
528
529
530
531
		return 1;
#endif
	return 0;
}

532
533
534
535
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
Ingo Molnar's avatar
Ingo Molnar committed
536

537
	/*
Ingo Molnar's avatar
Ingo Molnar committed
538
	 * Pentium F0 0F C7 C8 bug workaround:
539
540
541
542
543
544
545
546
547
548
549
550
551
	 */
	if (boot_cpu_data.f00f_bug) {
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

552
553
554
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";

Ingo Molnar's avatar
Ingo Molnar committed
555
556
557
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
558
{
559
560
561
562
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
563
		unsigned int level;
Ingo Molnar's avatar
Ingo Molnar committed
564

565
566
		pte_t *pte = lookup_address(address, &level);

567
568
		if (pte && pte_present(*pte) && !pte_exec(*pte))
			printk(nx_warning, current_uid());
569
570
	}

571
	printk(KERN_ALERT "BUG: unable to handle kernel ");
572
	if (address < PAGE_SIZE)
573
		printk(KERN_CONT "NULL pointer dereference");
574
	else
575
		printk(KERN_CONT "paging request");
Ingo Molnar's avatar
Ingo Molnar committed
576

577
	printk(KERN_CONT " at %p\n", (void *) address);
578
	printk(KERN_ALERT "IP:");
579
	printk_address(regs->ip, 1);
Ingo Molnar's avatar
Ingo Molnar committed
580

581
582
583
	dump_pagetable(address);
}

Ingo Molnar's avatar
Ingo Molnar committed
584
585
586
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
587
{
Ingo Molnar's avatar
Ingo Molnar committed
588
589
590
591
592
593
594
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
595

Linus Torvalds's avatar
Linus Torvalds committed
596
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
597
	       tsk->comm, address);
Linus Torvalds's avatar
Linus Torvalds committed
598
	dump_pagetable(address);
Ingo Molnar's avatar
Ingo Molnar committed
599
600
601
602
603

	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;

604
	if (__die("Bad pagetable", regs, error_code))
605
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
606

607
	oops_end(flags, regs, sig);
Linus Torvalds's avatar
Linus Torvalds committed
608
609
}

Ingo Molnar's avatar
Ingo Molnar committed
610
611
612
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address)
613
614
{
	struct task_struct *tsk = current;
615
	unsigned long *stackend;
616
617
618
	unsigned long flags;
	int sig;

Ingo Molnar's avatar
Ingo Molnar committed
619
	/* Are we prepared to handle this kernel fault? */
620
621
622
623
	if (fixup_exception(regs))
		return;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
624
625
626
627
628
629
630
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
631
	 *
Ingo Molnar's avatar
Ingo Molnar committed
632
	 *   Hall of shame of CPU/BIOS bugs.
633
634
635
636
637
638
639
640
641
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
Ingo Molnar's avatar
Ingo Molnar committed
642
	 * terminate things with extreme prejudice:
643
644
645
646
647
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

Ingo Molnar's avatar
Ingo Molnar committed
648
	stackend = end_of_stack(tsk);
649
	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
650
651
		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");

652
653
654
	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;
655
656
657
658

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
659

660
661
	/* Executive summary in case the body of the oops scrolled away */
	printk(KERN_EMERG "CR2: %016lx\n", address);
Ingo Molnar's avatar
Ingo Molnar committed
662

663
664
665
	oops_end(flags, regs, sig);
}

Ingo Molnar's avatar
Ingo Molnar committed
666
667
668
669
670
671
672
673
674
675
676
677
678
679
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

680
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
Ingo Molnar's avatar
Ingo Molnar committed
681
682
683
684
685
686
687
688
689
690
691
692
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address, int si_code)
693
694
695
696
697
698
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
699
		 * It's possible to have interrupts off here:
700
701
702
703
704
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
Ingo Molnar's avatar
Ingo Molnar committed
705
		 * from user space:
706
707
708
709
710
711
712
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

Ingo Molnar's avatar
Ingo Molnar committed
713
714
715
716
717
718
719
		if (unlikely(show_unhandled_signals))
			show_signal_msg(regs, error_code, address, tsk);

		/* Kernel addresses are always protection faults: */
		tsk->thread.cr2		= address;
		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
		tsk->thread.trap_no	= 14;
720
721

		force_sig_info_fault(SIGSEGV, si_code, address, tsk);
Ingo Molnar's avatar
Ingo Molnar committed
722

723
724
725
726
727
728
729
730
731
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

	no_context(regs, error_code, address);
}

Ingo Molnar's avatar
Ingo Molnar committed
732
733
734
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		     unsigned long address)
735
736
737
738
{
	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
739
740
741
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address, int si_code)
742
743
744
745
746
747
748
749
750
751
752
753
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	__bad_area_nosemaphore(regs, error_code, address, si_code);
}

Ingo Molnar's avatar
Ingo Molnar committed
754
755
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
756
757
758
759
{
	__bad_area(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
760
761
762
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
		      unsigned long address)
763
764
765
766
767
{
	__bad_area(regs, error_code, address, SEGV_ACCERR);
}

/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
Ingo Molnar's avatar
Ingo Molnar committed
768
769
770
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
	      unsigned long address)
771
772
773
{
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
Ingo Molnar's avatar
Ingo Molnar committed
774
	 * (which will retry the fault, or kill us if we got oom-killed):
775
776
	 */
	up_read(&current->mm->mmap_sem);
Ingo Molnar's avatar
Ingo Molnar committed
777

778
779
780
	pagefault_out_of_memory();
}

Ingo Molnar's avatar
Ingo Molnar committed
781
static void
782
783
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
	  unsigned int fault)
784
785
786
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
787
	int code = BUS_ADRERR;
788
789
790

	up_read(&mm->mmap_sem);

Ingo Molnar's avatar
Ingo Molnar committed
791
	/* Kernel mode? Handle exceptions or die: */
792
	if (!(error_code & PF_USER)) {
793
		no_context(regs, error_code, address);
794
795
		return;
	}
Ingo Molnar's avatar
Ingo Molnar committed
796

797
	/* User-space => ok to do another page fault: */
798
799
	if (is_prefetch(regs, error_code, address))
		return;
Ingo Molnar's avatar
Ingo Molnar committed
800
801
802
803
804

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
	tsk->thread.trap_no	= 14;

805
806
807
808
809
810
811
812
813
#ifdef CONFIG_MEMORY_FAILURE
	if (fault & VM_FAULT_HWPOISON) {
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
	force_sig_info_fault(SIGBUS, code, address, tsk);
814
815
}

Ingo Molnar's avatar
Ingo Molnar committed
816
817
818
static noinline void
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
819
{
Ingo Molnar's avatar
Ingo Molnar committed
820
	if (fault & VM_FAULT_OOM) {
821
		out_of_memory(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
822
	} else {
823
824
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON))
			do_sigbus(regs, error_code, address, fault);
Ingo Molnar's avatar
Ingo Molnar committed
825
826
827
		else
			BUG();
	}
828
829
}

830
831
832
833
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
834

835
836
837
838
839
840
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

841
/*
Ingo Molnar's avatar
Ingo Molnar committed
842
843
844
845
846
847
848
849
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
850
851
852
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
853
static noinline __kprobes int
Ingo Molnar's avatar
Ingo Molnar committed
854
spurious_fault(unsigned long error_code, unsigned long address)
855
856
857
858
859
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
860
	int ret;
861
862
863
864
865
866
867
868
869
870
871
872
873

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

874
875
876
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

877
878
879
880
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

881
882
883
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

884
885
886
887
888
889
	/*
	 * Note: don't use pte_present() here, since it returns true
	 * if the _PAGE_PROTNONE bit is set.  However, this aliases the
	 * _PAGE_GLOBAL bit, which for kernel pages give false positives
	 * when CONFIG_DEBUG_PAGEALLOC is used.
	 */
890
	pte = pte_offset_kernel(pmd, address);
891
	if (!(pte_flags(*pte) & _PAGE_PRESENT))
892
893
		return 0;

894
895
896
897
898
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
899
900
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
901
902
903
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
Ingo Molnar's avatar
Ingo Molnar committed
904

905
	return ret;
906
907
}

908
int show_unhandled_signals = 1;
Linus Torvalds's avatar
Linus Torvalds committed
909

Ingo Molnar's avatar
Ingo Molnar committed
910
911
static inline int
access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
912
913
{
	if (write) {
Ingo Molnar's avatar
Ingo Molnar committed
914
		/* write, present and write, not present: */
915
916
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
Ingo Molnar's avatar
Ingo Molnar committed
917
		return 0;
918
919
	}

Ingo Molnar's avatar
Ingo Molnar committed
920
921
922
923
924
925
926
927
	/* read, present: */
	if (unlikely(error_code & PF_PROT))
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

928
929
930
	return 0;
}

931
932
static int fault_in_kernel_space(unsigned long address)
{
933
	return address >= TASK_SIZE_MAX;
934
935
}

Linus Torvalds's avatar
Linus Torvalds committed
936
937
938
939
940
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
941
942
dotraplinkage void __kprobes
do_page_fault(struct pt_regs *regs, unsigned long error_code)
Linus Torvalds's avatar
Linus Torvalds committed
943
{
Ingo Molnar's avatar
Ingo Molnar committed
944
	struct vm_area_struct *vma;
Linus Torvalds's avatar
Linus Torvalds committed
945
	struct task_struct *tsk;
Ingo Molnar's avatar
Ingo Molnar committed
946
	unsigned long address;
Linus Torvalds's avatar
Linus Torvalds committed
947
	struct mm_struct *mm;
948
	int write;
949
	int fault;
Linus Torvalds's avatar
Linus Torvalds committed
950

951
952
	tsk = current;
	mm = tsk->mm;
Ingo Molnar's avatar
Ingo Molnar committed
953
954

	/* Get the faulting address: */
955
	address = read_cr2();
Linus Torvalds's avatar
Linus Torvalds committed
956

Vegard Nossum's avatar
Vegard Nossum committed
957
958
959
960
961
962
	/*
	 * Detect and handle instructions that would cause a page fault for
	 * both a tracked kernel page and a userspace page.
	 */
	if (kmemcheck_active(regs))
		kmemcheck_hide(regs);
963
	prefetchw(&mm->mmap_sem);
Vegard Nossum's avatar
Vegard Nossum committed
964

965
	if (unlikely(kmmio_fault(regs, address)))
966
		return;
Linus Torvalds's avatar
Linus Torvalds committed
967
968
969
970
971
972
973
974
975
976
977
978

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
979
	 * protection error (error_code & 9) == 0.
Linus Torvalds's avatar
Linus Torvalds committed
980
	 */
981
	if (unlikely(fault_in_kernel_space(address))) {
Vegard Nossum's avatar
Vegard Nossum committed
982
983
984
985
986
987
988
		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
			if (vmalloc_fault(address) >= 0)
				return;

			if (kmemcheck_fault(regs, address, error_code))
				return;
		}
989

Ingo Molnar's avatar
Ingo Molnar committed
990
		/* Can handle a stale RO->RW TLB: */
991
		if (spurious_fault(error_code, address))
992
993
			return;

Ingo Molnar's avatar
Ingo Molnar committed
994
		/* kprobes don't want to hook the spurious faults: */
995
996
		if (notify_page_fault(regs))
			return;
997
998
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
Ingo Molnar's avatar
Ingo Molnar committed
999
		 * fault we could otherwise deadlock:
1000
		 */
1001
		bad_area_nosemaphore(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
1002

1003
		return;
1004
1005
	}

Ingo Molnar's avatar
Ingo Molnar committed
1006
	/* kprobes don't want to hook the spurious faults: */
1007
	if (unlikely(notify_page_fault(regs)))
1008
		return;
1009
	/*
1010
1011
1012
1013
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
Ingo Molnar's avatar
Ingo Molnar committed
1014
	 * potential system fault or CPU buglet:
1015
	 */
1016
1017
1018
	if (user_mode_vm(regs)) {
		local_irq_enable();
		error_code |= PF_USER;
Ingo Molnar's avatar
Ingo Molnar committed
1019
1020
1021
1022
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}
1023

1024
	if (unlikely(error_code & PF_RSVD))
1025
		pgtable_bad(regs, error_code, address);
Linus Torvalds's avatar
Linus Torvalds committed
1026

1027
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
1028

Linus Torvalds's avatar
Linus Torvalds committed
1029
	/*
Ingo Molnar's avatar
Ingo Molnar committed
1030
1031
	 * If we're in an interrupt, have no user context or are running
	 * in an atomic region then we must not take the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1032
	 */
1033
1034
1035
1036
	if (unlikely(in_atomic() || !mm)) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1037

1038
1039
	/*
	 * When running in the kernel we expect faults to occur only to
Ingo Molnar's avatar
Ingo Molnar committed
1040
1041
1042
1043
1044
1045
1046
	 * addresses in user space.  All other faults represent errors in
	 * the kernel and should generate an OOPS.  Unfortunately, in the
	 * case of an erroneous fault occurring in a code path which already
	 * holds mmap_sem we will deadlock attempting to validate the fault
	 * against the address space.  Luckily the kernel only validly
	 * references user space from well defined areas of code, which are
	 * listed in the exceptions table.
Linus Torvalds's avatar
Linus Torvalds committed
1047
1048
	 *
	 * As the vast majority of faults will be valid we will only perform
Ingo Molnar's avatar
Ingo Molnar committed
1049
1050
1051
1052
	 * the source reference check when there is a possibility of a
	 * deadlock. Attempt to lock the address space, if we cannot we then
	 * validate the source. If this is invalid we can skip the address
	 * space check, thus avoiding the deadlock:
Linus Torvalds's avatar
Linus Torvalds committed
1053
	 */
1054
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1055
		if ((error_code & PF_USER) == 0 &&
1056
1057
1058
1059
		    !search_exception_tables(regs->ip)) {
			bad_area_nosemaphore(regs, error_code, address);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1060
		down_read(&mm->mmap_sem);
1061
1062
	} else {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
1063
1064
1065
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1066
1067
		 */
		might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
1068
1069
1070
	}

	vma = find_vma(mm, address);
1071
1072
1073
1074
1075
	if (unlikely(!vma)) {
		bad_area(regs, error_code, address);
		return;
	}
	if (likely(vma->vm_start <= address))
Linus Torvalds's avatar
Linus Torvalds committed
1076
		goto good_area;
1077
1078
1079
1080
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
		bad_area(regs, error_code, address);
		return;
	}
1081
	if (error_code & PF_USER) {
1082
1083
1084
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
Ingo Molnar's avatar
Ingo Molnar committed
1085
		 * and pusha to work. ("enter $65535, $31" pushes
1086
		 * 32 pointers and then decrements %sp by 65535.)
1087
		 */
1088
1089
1090
1091
		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
			bad_area(regs, error_code, address);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1092
	}
1093
1094
1095
1096
1097
1098
1099
1100
1101
	if (unlikely(expand_stack(vma, address))) {
		bad_area(regs, error_code, address);
		return;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1102
good_area:
1103
	write = error_code & PF_WRITE;
Ingo Molnar's avatar
Ingo Molnar committed
1104

1105
1106
1107
	if (unlikely(access_error(error_code, write, vma))) {
		bad_area_access_error(regs, error_code, address);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1108
1109
1110
1111
1112
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
Ingo Molnar's avatar
Ingo Molnar committed
1113
	 * the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1114
	 */
1115
	fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
Ingo Molnar's avatar
Ingo Molnar committed
1116

Nick Piggin's avatar
Nick Piggin committed
1117
	if (unlikely(fault & VM_FAULT_ERROR)) {
1118
1119
		mm_fault_error(regs, error_code, address, fault);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1120
	}
Ingo Molnar's avatar
Ingo Molnar committed
1121

1122
	if (fault & VM_FAULT_MAJOR) {
Nick Piggin's avatar
Nick Piggin committed
1123
		tsk->maj_flt++;
1124
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
1125
				     regs, address);
1126
	} else {
Nick Piggin's avatar
Nick Piggin committed
1127
		tsk->min_flt++;
1128
		perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
1129
				     regs, address);
1130
	}
1131

1132
1133
	check_v8086_mode(regs, address, tsk);

Linus Torvalds's avatar
Linus Torvalds committed
1134
1135
	up_read(&mm->mmap_sem);
}