fault.c 28.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
/*
 *  Copyright (C) 1995  Linus Torvalds
Ingo Molnar's avatar
Ingo Molnar committed
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
Linus Torvalds's avatar
Linus Torvalds committed
5
 */
6
7
8
9
10
11
12
#include <linux/magic.h>		/* STACK_END_MAGIC		*/
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
#include <linux/kprobes.h>		/* __kprobes, ...		*/
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13
#include <linux/perf_event.h>		/* perf_sw_event		*/
14
#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
15
#include <linux/prefetch.h>		/* prefetchw			*/
Ingo Molnar's avatar
Ingo Molnar committed
16

17
18
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
Vegard Nossum's avatar
Vegard Nossum committed
19
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
20
#include <asm/fixmap.h>			/* VSYSCALL_START		*/
Linus Torvalds's avatar
Linus Torvalds committed
21

22
/*
Ingo Molnar's avatar
Ingo Molnar committed
23
24
25
26
27
28
29
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
30
 */
Ingo Molnar's avatar
Ingo Molnar committed
31
32
33
34
35
36
37
38
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
};
39

40
/*
41
42
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
43
 */
44
45
static inline int __kprobes
kmmio_fault(struct pt_regs *regs, unsigned long addr)
46
{
47
48
49
50
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
51
52
}

53
static inline int __kprobes notify_page_fault(struct pt_regs *regs)
54
{
55
56
57
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
58
	if (kprobes_built_in() && !user_mode_vm(regs)) {
59
60
61
62
63
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
64

65
	return ret;
66
}
67

68
/*
Ingo Molnar's avatar
Ingo Molnar committed
69
70
71
72
73
74
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
75
 *
Ingo Molnar's avatar
Ingo Molnar committed
76
 * 64-bit mode:
77
 *
Ingo Molnar's avatar
Ingo Molnar committed
78
79
80
81
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
82
 */
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
109
		return (!user_mode(regs) || user_64bit_mode(regs));
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
130
131
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
132
{
Ingo Molnar's avatar
Ingo Molnar committed
133
	unsigned char *max_instr;
134
	unsigned char *instr;
135
	int prefetch = 0;
Linus Torvalds's avatar
Linus Torvalds committed
136

Ingo Molnar's avatar
Ingo Molnar committed
137
138
139
140
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
141
	if (error_code & PF_INSTR)
Linus Torvalds's avatar
Linus Torvalds committed
142
		return 0;
143

144
	instr = (void *)convert_ip_to_linear(current, regs);
145
	max_instr = instr + 15;
Linus Torvalds's avatar
Linus Torvalds committed
146

147
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
148
149
		return 0;

150
	while (instr < max_instr) {
Ingo Molnar's avatar
Ingo Molnar committed
151
		unsigned char opcode;
Linus Torvalds's avatar
Linus Torvalds committed
152

153
		if (probe_kernel_address(instr, opcode))
154
			break;
Linus Torvalds's avatar
Linus Torvalds committed
155
156
157

		instr++;

158
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
Linus Torvalds's avatar
Linus Torvalds committed
159
160
161
162
163
			break;
	}
	return prefetch;
}

Ingo Molnar's avatar
Ingo Molnar committed
164
165
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
166
		     struct task_struct *tsk, int fault)
167
{
168
	unsigned lsb = 0;
169
170
	siginfo_t info;

Ingo Molnar's avatar
Ingo Molnar committed
171
172
173
174
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
175
176
177
178
179
	if (fault & VM_FAULT_HWPOISON_LARGE)
		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
	if (fault & VM_FAULT_HWPOISON)
		lsb = PAGE_SHIFT;
	info.si_addr_lsb = lsb;
Ingo Molnar's avatar
Ingo Molnar committed
180

181
182
183
	force_sig_info(si_signo, &info, tsk);
}

184
185
186
187
188
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
189
{
190
191
192
193
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
Ingo Molnar's avatar
Ingo Molnar committed
194

195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

216
	if (!pmd_present(*pmd))
217
		set_pmd(pmd, *pmd_k);
218
	else
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {
		struct page *page;

236
		spin_lock(&pgd_lock);
237
		list_for_each_entry(page, &pgd_list, lru) {
238
			spinlock_t *pgt_lock;
239
			pmd_t *ret;
240

241
			/* the pgt_lock only for Xen */
242
243
244
245
246
247
248
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
			ret = vmalloc_sync_one(page_address(page), address);
			spin_unlock(pgt_lock);

			if (!ret)
249
250
				break;
		}
251
		spin_unlock(&pgd_lock);
252
253
254
255
256
257
258
259
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
260
static noinline __kprobes int vmalloc_fault(unsigned long address)
261
262
263
264
265
266
267
268
269
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

270
271
	WARN_ON_ONCE(in_nmi());

272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
	unsigned long bit;

	if (!v8086_mode(regs))
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
		tsk->thread.screen_bitmap |= 1 << bit;
306
}
Linus Torvalds's avatar
Linus Torvalds committed
307

Akinobu Mita's avatar
Akinobu Mita committed
308
static bool low_pfn(unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
309
{
Akinobu Mita's avatar
Akinobu Mita committed
310
311
	return pfn < max_low_pfn;
}
312

Akinobu Mita's avatar
Akinobu Mita committed
313
314
315
316
317
318
static void dump_pagetable(unsigned long address)
{
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(address)];
	pmd_t *pmd;
	pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
319

320
#ifdef CONFIG_X86_PAE
Akinobu Mita's avatar
Akinobu Mita committed
321
322
323
	printk("*pdpt = %016Lx ", pgd_val(*pgd));
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
324
#endif
Akinobu Mita's avatar
Akinobu Mita committed
325
326
	pmd = pmd_offset(pud_offset(pgd, address), address);
	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
327
328
329
330
331

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
Ingo Molnar's avatar
Ingo Molnar committed
332
	 * it's allocated already:
333
	 */
Akinobu Mita's avatar
Akinobu Mita committed
334
335
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
336

Akinobu Mita's avatar
Akinobu Mita committed
337
338
339
	pte = pte_offset_kernel(pmd, address);
	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
340
	printk("\n");
341
342
343
344
345
346
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
347
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
348
349
350
351
352
353
354
355
356
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
357
static noinline __kprobes int vmalloc_fault(unsigned long address)
358
359
360
361
362
363
364
365
366
367
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

368
369
	WARN_ON_ONCE(in_nmi());

370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

	if (pgd_none(*pgd))
		set_pgd(pgd, *pgd_ref);
	else
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}

423
#ifdef CONFIG_CPU_SUP_AMD
424
static const char errata93_warning[] =
425
426
427
428
429
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
430
#endif
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
Akinobu Mita's avatar
Akinobu Mita committed
450
451
	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
	pgd_t *pgd = base + pgd_index(address);
Linus Torvalds's avatar
Linus Torvalds committed
452
453
454
455
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

Ingo Molnar's avatar
Ingo Molnar committed
456
457
458
	if (bad_address(pgd))
		goto bad;

459
	printk("PGD %lx ", pgd_val(*pgd));
Ingo Molnar's avatar
Ingo Molnar committed
460
461
462

	if (!pgd_present(*pgd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
463

464
	pud = pud_offset(pgd, address);
Ingo Molnar's avatar
Ingo Molnar committed
465
466
467
	if (bad_address(pud))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
468
	printk("PUD %lx ", pud_val(*pud));
469
	if (!pud_present(*pud) || pud_large(*pud))
Ingo Molnar's avatar
Ingo Molnar committed
470
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
471
472

	pmd = pmd_offset(pud, address);
Ingo Molnar's avatar
Ingo Molnar committed
473
474
475
	if (bad_address(pmd))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
476
	printk("PMD %lx ", pmd_val(*pmd));
Ingo Molnar's avatar
Ingo Molnar committed
477
478
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
479
480

	pte = pte_offset_kernel(pmd, address);
Ingo Molnar's avatar
Ingo Molnar committed
481
482
483
	if (bad_address(pte))
		goto bad;

484
	printk("PTE %lx", pte_val(*pte));
Ingo Molnar's avatar
Ingo Molnar committed
485
out:
Linus Torvalds's avatar
Linus Torvalds committed
486
487
488
489
	printk("\n");
	return;
bad:
	printk("BAD\n");
490
491
}

492
#endif /* CONFIG_X86_64 */
Linus Torvalds's avatar
Linus Torvalds committed
493

Ingo Molnar's avatar
Ingo Molnar committed
494
495
496
497
498
499
500
501
502
503
504
505
506
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
507
 */
508
static int is_errata93(struct pt_regs *regs, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
509
{
510
511
512
513
514
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
	    || boot_cpu_data.x86 != 0xf)
		return 0;

515
	if (address != regs->ip)
Linus Torvalds's avatar
Linus Torvalds committed
516
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
517

518
	if ((address >> 32) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
519
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
520

Linus Torvalds's avatar
Linus Torvalds committed
521
	address |= 0xffffffffUL << 32;
522
523
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
524
		printk_once(errata93_warning);
525
		regs->ip = address;
Linus Torvalds's avatar
Linus Torvalds committed
526
527
		return 1;
	}
528
#endif
Linus Torvalds's avatar
Linus Torvalds committed
529
	return 0;
530
}
Linus Torvalds's avatar
Linus Torvalds committed
531

532
/*
Ingo Molnar's avatar
Ingo Molnar committed
533
534
535
536
537
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
538
539
540
541
542
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
Ingo Molnar's avatar
Ingo Molnar committed
543
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
544
545
546
547
548
		return 1;
#endif
	return 0;
}

549
550
551
552
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
Ingo Molnar's avatar
Ingo Molnar committed
553

554
	/*
Ingo Molnar's avatar
Ingo Molnar committed
555
	 * Pentium F0 0F C7 C8 bug workaround:
556
557
558
559
560
561
562
563
564
565
566
567
568
	 */
	if (boot_cpu_data.f00f_bug) {
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

569
570
571
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";

Ingo Molnar's avatar
Ingo Molnar committed
572
573
574
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
575
{
576
577
578
579
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
580
		unsigned int level;
Ingo Molnar's avatar
Ingo Molnar committed
581

582
583
		pte_t *pte = lookup_address(address, &level);

584
585
		if (pte && pte_present(*pte) && !pte_exec(*pte))
			printk(nx_warning, current_uid());
586
587
	}

588
	printk(KERN_ALERT "BUG: unable to handle kernel ");
589
	if (address < PAGE_SIZE)
590
		printk(KERN_CONT "NULL pointer dereference");
591
	else
592
		printk(KERN_CONT "paging request");
Ingo Molnar's avatar
Ingo Molnar committed
593

594
	printk(KERN_CONT " at %p\n", (void *) address);
595
	printk(KERN_ALERT "IP:");
596
	printk_address(regs->ip, 1);
Ingo Molnar's avatar
Ingo Molnar committed
597

598
599
600
	dump_pagetable(address);
}

Ingo Molnar's avatar
Ingo Molnar committed
601
602
603
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
604
{
Ingo Molnar's avatar
Ingo Molnar committed
605
606
607
608
609
610
611
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
612

Linus Torvalds's avatar
Linus Torvalds committed
613
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
614
	       tsk->comm, address);
Linus Torvalds's avatar
Linus Torvalds committed
615
	dump_pagetable(address);
Ingo Molnar's avatar
Ingo Molnar committed
616
617
618
619
620

	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;

621
	if (__die("Bad pagetable", regs, error_code))
622
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
623

624
	oops_end(flags, regs, sig);
Linus Torvalds's avatar
Linus Torvalds committed
625
626
}

Ingo Molnar's avatar
Ingo Molnar committed
627
628
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
629
	   unsigned long address, int signal, int si_code)
630
631
{
	struct task_struct *tsk = current;
632
	unsigned long *stackend;
633
634
635
	unsigned long flags;
	int sig;

Ingo Molnar's avatar
Ingo Molnar committed
636
	/* Are we prepared to handle this kernel fault? */
637
638
639
640
641
642
643
644
645
	if (fixup_exception(regs)) {
		if (current_thread_info()->sig_on_uaccess_error && signal) {
			tsk->thread.trap_no = 14;
			tsk->thread.error_code = error_code | PF_USER;
			tsk->thread.cr2 = address;

			/* XXX: hwpoison faults will set the wrong code. */
			force_sig_info_fault(signal, si_code, address, tsk, 0);
		}
646
		return;
647
	}
648
649

	/*
Ingo Molnar's avatar
Ingo Molnar committed
650
651
652
653
654
655
656
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
657
	 *
Ingo Molnar's avatar
Ingo Molnar committed
658
	 *   Hall of shame of CPU/BIOS bugs.
659
660
661
662
663
664
665
666
667
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
Ingo Molnar's avatar
Ingo Molnar committed
668
	 * terminate things with extreme prejudice:
669
670
671
672
673
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

Ingo Molnar's avatar
Ingo Molnar committed
674
	stackend = end_of_stack(tsk);
675
	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
676
677
		printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");

678
679
680
	tsk->thread.cr2		= address;
	tsk->thread.trap_no	= 14;
	tsk->thread.error_code	= error_code;
681
682
683
684

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
685

686
687
	/* Executive summary in case the body of the oops scrolled away */
	printk(KERN_EMERG "CR2: %016lx\n", address);
Ingo Molnar's avatar
Ingo Molnar committed
688

689
690
691
	oops_end(flags, regs, sig);
}

Ingo Molnar's avatar
Ingo Molnar committed
692
693
694
695
696
697
698
699
700
701
702
703
704
705
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

706
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
Ingo Molnar's avatar
Ingo Molnar committed
707
708
709
710
711
712
713
714
715
716
717
718
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address, int si_code)
719
720
721
722
723
724
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
725
		 * It's possible to have interrupts off here:
726
727
728
729
730
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
Ingo Molnar's avatar
Ingo Molnar committed
731
		 * from user space:
732
733
734
735
736
737
738
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

739
740
741
742
743
744
745
746
747
748
749
750
#ifdef CONFIG_X86_64
		/*
		 * Instruction fetch faults in the vsyscall page might need
		 * emulation.
		 */
		if (unlikely((error_code & PF_INSTR) &&
			     ((address & ~0xfff) == VSYSCALL_START))) {
			if (emulate_vsyscall(regs, address))
				return;
		}
#endif

Ingo Molnar's avatar
Ingo Molnar committed
751
752
753
754
755
756
757
		if (unlikely(show_unhandled_signals))
			show_signal_msg(regs, error_code, address, tsk);

		/* Kernel addresses are always protection faults: */
		tsk->thread.cr2		= address;
		tsk->thread.error_code	= error_code | (address >= TASK_SIZE);
		tsk->thread.trap_no	= 14;
758

759
		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
Ingo Molnar's avatar
Ingo Molnar committed
760

761
762
763
764
765
766
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

767
	no_context(regs, error_code, address, SIGSEGV, si_code);
768
769
}

Ingo Molnar's avatar
Ingo Molnar committed
770
771
772
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		     unsigned long address)
773
774
775
776
{
	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
777
778
779
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address, int si_code)
780
781
782
783
784
785
786
787
788
789
790
791
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	__bad_area_nosemaphore(regs, error_code, address, si_code);
}

Ingo Molnar's avatar
Ingo Molnar committed
792
793
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
794
795
796
797
{
	__bad_area(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
798
799
800
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
		      unsigned long address)
801
802
803
804
805
{
	__bad_area(regs, error_code, address, SEGV_ACCERR);
}

/* TODO: fixup for "mm-invoke-oom-killer-from-page-fault.patch" */
Ingo Molnar's avatar
Ingo Molnar committed
806
807
808
static void
out_of_memory(struct pt_regs *regs, unsigned long error_code,
	      unsigned long address)
809
810
811
{
	/*
	 * We ran out of memory, call the OOM killer, and return the userspace
Ingo Molnar's avatar
Ingo Molnar committed
812
	 * (which will retry the fault, or kill us if we got oom-killed):
813
814
	 */
	up_read(&current->mm->mmap_sem);
Ingo Molnar's avatar
Ingo Molnar committed
815

816
817
818
	pagefault_out_of_memory();
}

Ingo Molnar's avatar
Ingo Molnar committed
819
static void
820
821
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
	  unsigned int fault)
822
823
824
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
825
	int code = BUS_ADRERR;
826
827
828

	up_read(&mm->mmap_sem);

Ingo Molnar's avatar
Ingo Molnar committed
829
	/* Kernel mode? Handle exceptions or die: */
830
	if (!(error_code & PF_USER)) {
831
		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
832
833
		return;
	}
Ingo Molnar's avatar
Ingo Molnar committed
834

835
	/* User-space => ok to do another page fault: */
836
837
	if (is_prefetch(regs, error_code, address))
		return;
Ingo Molnar's avatar
Ingo Molnar committed
838
839
840
841
842

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
	tsk->thread.trap_no	= 14;

843
#ifdef CONFIG_MEMORY_FAILURE
844
	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
845
846
847
848
849
850
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
851
	force_sig_info_fault(SIGBUS, code, address, tsk, fault);
852
853
}

854
static noinline int
Ingo Molnar's avatar
Ingo Molnar committed
855
856
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
857
{
858
859
860
861
862
863
864
865
	/*
	 * Pagefault was interrupted by SIGKILL. We have no reason to
	 * continue pagefault.
	 */
	if (fatal_signal_pending(current)) {
		if (!(fault & VM_FAULT_RETRY))
			up_read(&current->mm->mmap_sem);
		if (!(error_code & PF_USER))
866
			no_context(regs, error_code, address, 0, 0);
867
868
869
870
871
		return 1;
	}
	if (!(fault & VM_FAULT_ERROR))
		return 0;

Ingo Molnar's avatar
Ingo Molnar committed
872
	if (fault & VM_FAULT_OOM) {
873
874
875
		/* Kernel mode? Handle exceptions or die: */
		if (!(error_code & PF_USER)) {
			up_read(&current->mm->mmap_sem);
876
877
			no_context(regs, error_code, address,
				   SIGSEGV, SEGV_MAPERR);
878
			return 1;
879
880
		}

881
		out_of_memory(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
882
	} else {
883
884
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
			     VM_FAULT_HWPOISON_LARGE))
885
			do_sigbus(regs, error_code, address, fault);
Ingo Molnar's avatar
Ingo Molnar committed
886
887
888
		else
			BUG();
	}
889
	return 1;
890
891
}

892
893
894
895
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
896

897
898
899
900
901
902
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

903
/*
Ingo Molnar's avatar
Ingo Molnar committed
904
905
906
907
908
909
910
911
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
912
913
914
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
915
static noinline __kprobes int
Ingo Molnar's avatar
Ingo Molnar committed
916
spurious_fault(unsigned long error_code, unsigned long address)
917
918
919
920
921
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
922
	int ret;
923
924
925
926
927
928
929
930
931
932
933
934
935

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

936
937
938
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

939
940
941
942
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

943
944
945
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

946
947
948
949
950
951
	/*
	 * Note: don't use pte_present() here, since it returns true
	 * if the _PAGE_PROTNONE bit is set.  However, this aliases the
	 * _PAGE_GLOBAL bit, which for kernel pages give false positives
	 * when CONFIG_DEBUG_PAGEALLOC is used.
	 */
952
	pte = pte_offset_kernel(pmd, address);
953
	if (!(pte_flags(*pte) & _PAGE_PRESENT))
954
955
		return 0;

956
957
958
959
960
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
961
962
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
963
964
965
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
Ingo Molnar's avatar
Ingo Molnar committed
966

967
	return ret;
968
969
}

970
int show_unhandled_signals = 1;
Linus Torvalds's avatar
Linus Torvalds committed
971

Ingo Molnar's avatar
Ingo Molnar committed
972
static inline int
973
access_error(unsigned long error_code, struct vm_area_struct *vma)
974
{
975
	if (error_code & PF_WRITE) {
Ingo Molnar's avatar
Ingo Molnar committed
976
		/* write, present and write, not present: */
977
978
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
Ingo Molnar's avatar
Ingo Molnar committed
979
		return 0;
980
981
	}

Ingo Molnar's avatar
Ingo Molnar committed
982
983
984
985
986
987
988
989
	/* read, present: */
	if (unlikely(error_code & PF_PROT))
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

990
991
992
	return 0;
}

993
994
static int fault_in_kernel_space(unsigned long address)
{
995
	return address >= TASK_SIZE_MAX;
996
997
}

Linus Torvalds's avatar
Linus Torvalds committed
998
999
1000
1001
1002
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
 */
1003
1004
dotraplinkage void __kprobes
do_page_fault(struct pt_regs *regs, unsigned long error_code)
Linus Torvalds's avatar
Linus Torvalds committed
1005
{
Ingo Molnar's avatar
Ingo Molnar committed
1006
	struct vm_area_struct *vma;
Linus Torvalds's avatar
Linus Torvalds committed
1007
	struct task_struct *tsk;
Ingo Molnar's avatar
Ingo Molnar committed
1008
	unsigned long address;
Linus Torvalds's avatar
Linus Torvalds committed
1009
	struct mm_struct *mm;
1010
	int fault;
1011
	int write = error_code & PF_WRITE;
1012
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
1013
					(write ? FAULT_FLAG_WRITE : 0);
Linus Torvalds's avatar
Linus Torvalds committed
1014

1015
1016
	tsk = current;
	mm = tsk->mm;
Ingo Molnar's avatar
Ingo Molnar committed
1017
1018

	/* Get the faulting address: */
1019
	address = read_cr2();
Linus Torvalds's avatar
Linus Torvalds committed
1020

Vegard Nossum's avatar
Vegard Nossum committed
1021
1022
1023
1024
1025
1026
	/*
	 * Detect and handle instructions that would cause a page fault for
	 * both a tracked kernel page and a userspace page.
	 */
	if (kmemcheck_active(regs))
		kmemcheck_hide(regs);
1027
	prefetchw(&mm->mmap_sem);
Vegard Nossum's avatar
Vegard Nossum committed
1028

1029
	if (unlikely(kmmio_fault(regs, address)))
1030
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
1043
	 * protection error (error_code & 9) == 0.
Linus Torvalds's avatar
Linus Torvalds committed
1044
	 */
1045
	if (unlikely(fault_in_kernel_space(address))) {
Vegard Nossum's avatar
Vegard Nossum committed
1046
1047
1048
1049
1050
1051
1052
		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
			if (vmalloc_fault(address) >= 0)
				return;

			if (kmemcheck_fault(regs, address, error_code))
				return;
		}
1053

Ingo Molnar's avatar
Ingo Molnar committed
1054
		/* Can handle a stale RO->RW TLB: */
1055
		if (spurious_fault(error_code, address))
1056
1057
			return;

Ingo Molnar's avatar
Ingo Molnar committed
1058
		/* kprobes don't want to hook the spurious faults: */
1059
1060
		if (notify_page_fault(regs))
			return;
1061
1062
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
Ingo Molnar's avatar
Ingo Molnar committed
1063
		 * fault we could otherwise deadlock:
1064
		 */
1065
		bad_area_nosemaphore(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
1066

1067
		return;
1068
1069
	}

Ingo Molnar's avatar
Ingo Molnar committed
1070
	/* kprobes don't want to hook the spurious faults: */
1071
	if (unlikely(notify_page_fault(regs)))
1072
		return;
1073
	/*
1074
1075
1076
1077
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
Ingo Molnar's avatar
Ingo Molnar committed
1078
	 * potential system fault or CPU buglet:
1079
	 */
1080
1081
1082
	if (user_mode_vm(regs)) {
		local_irq_enable();
		error_code |= PF_USER;
Ingo Molnar's avatar
Ingo Molnar committed
1083
1084
1085
1086
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}
1087

1088
	if (unlikely(error_code & PF_RSVD))
1089
		pgtable_bad(regs, error_code, address);
Linus Torvalds's avatar
Linus Torvalds committed
1090

1091
	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
1092

Linus Torvalds's avatar
Linus Torvalds committed
1093
	/*
Ingo Molnar's avatar
Ingo Molnar committed
1094
1095
	 * If we're in an interrupt, have no user context or are running
	 * in an atomic region then we must not take the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1096
	 */
1097
1098
1099
1100
	if (unlikely(in_atomic() || !mm)) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1101

1102
1103
	/*
	 * When running in the kernel we expect faults to occur only to
Ingo Molnar's avatar
Ingo Molnar committed
1104
1105
1106
1107
1108
1109
1110
	 * addresses in user space.  All other faults represent errors in
	 * the kernel and should generate an OOPS.  Unfortunately, in the
	 * case of an erroneous fault occurring in a code path which already
	 * holds mmap_sem we will deadlock attempting to validate the fault
	 * against the address space.  Luckily the kernel only validly
	 * references user space from well defined areas of code, which are
	 * listed in the exceptions table.
Linus Torvalds's avatar
Linus Torvalds committed
1111
1112
	 *
	 * As the vast majority of faults will be valid we will only perform
Ingo Molnar's avatar
Ingo Molnar committed
1113
1114
1115
1116
	 * the source reference check when there is a possibility of a
	 * deadlock. Attempt to lock the address space, if we cannot we then
	 * validate the source. If this is invalid we can skip the address
	 * space check, thus avoiding the deadlock:
Linus Torvalds's avatar
Linus Torvalds committed
1117
	 */
1118
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1119
		if ((error_code & PF_USER) == 0 &&
1120
1121
1122
1123
		    !search_exception_tables(regs->ip)) {
			bad_area_nosemaphore(regs, error_code, address);
			return;
		}
1124
retry:
Linus Torvalds's avatar
Linus Torvalds committed
1125
		down_read(&mm->mmap_sem);
1126
1127
	} else {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
1128
1129
1130
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1131
1132
		 */
		might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
1133
1134
1135
	}

	vma = find_vma(mm, address);
1136
1137
1138
1139
1140
	if (unlikely(!vma)) {
		bad_area(regs, error_code, address);
		return;
	}
	if (likely(vma->vm_start <= address))
Linus Torvalds's avatar
Linus Torvalds committed
1141
		goto good_area;
1142
1143
1144
1145
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
		bad_area(regs, error_code, address);
		return;
	}
1146
	if (error_code & PF_USER) {
1147
1148
1149
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
Ingo Molnar's avatar
Ingo Molnar committed
1150
		 * and pusha to work. ("enter $65535, $31" pushes
1151
		 * 32 pointers and then decrements %sp by 65535.)
1152
		 */
1153
1154
1155
1156
		if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
			bad_area(regs, error_code, address);
			return;
		}
Linus Torvalds's avatar
Linus Torvalds committed
1157
	}
1158
1159
1160
1161
1162
1163
1164
1165
1166
	if (unlikely(expand_stack(vma, address))) {
		bad_area(regs, error_code, address);
		return;
	}

	/*
	 * Ok, we have a good vm_area for this memory access, so
	 * we can handle it..
	 */
Linus Torvalds's avatar
Linus Torvalds committed
1167
good_area:
1168
	if (unlikely(access_error(error_code, vma))) {
1169
1170
		bad_area_access_error(regs, error_code, address);
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1171
1172
1173
1174
1175
	}

	/*
	 * If for any reason at all we couldn't handle the fault,
	 * make sure we exit gracefully rather than endlessly redo
Ingo Molnar's avatar
Ingo Molnar committed
1176
	 * the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1177
	 */
1178
	fault = handle_mm_fault(mm, vma, address, flags);
Ingo Molnar's avatar
Ingo Molnar committed
1179

1180
1181
1182
	if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
		if (mm_fault_error(regs, error_code, address, fault))
			return;
1183
1184
	}

1185
1186
1187
1188
1189
1190
1191
1192
	/*
	 * Major/minor page fault accounting is only done on the
	 * initial attempt. If we go through a retry, it is extremely
	 * likely that the page will be found in page cache at that point.
	 */
	if (flags & FAULT_FLAG_ALLOW_RETRY) {
		if (fault & VM_FAULT_MAJOR) {
			tsk->maj_flt++;
1193
			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
1194