fault.c 31.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
/*
 *  Copyright (C) 1995  Linus Torvalds
Ingo Molnar's avatar
Ingo Molnar committed
3
 *  Copyright (C) 2001, 2002 Andi Kleen, SuSE Labs.
4
 *  Copyright (C) 2008-2009, Red Hat Inc., Ingo Molnar
Linus Torvalds's avatar
Linus Torvalds committed
5
 */
6
7
8
9
10
11
12
#include <linux/magic.h>		/* STACK_END_MAGIC		*/
#include <linux/sched.h>		/* test_thread_flag(), ...	*/
#include <linux/kdebug.h>		/* oops_begin/end, ...		*/
#include <linux/module.h>		/* search_exception_table	*/
#include <linux/bootmem.h>		/* max_low_pfn			*/
#include <linux/kprobes.h>		/* __kprobes, ...		*/
#include <linux/mmiotrace.h>		/* kmmio_handler, ...		*/
13
#include <linux/perf_event.h>		/* perf_sw_event		*/
14
#include <linux/hugetlb.h>		/* hstate_index_to_shift	*/
15
#include <linux/prefetch.h>		/* prefetchw			*/
16
#include <linux/context_tracking.h>	/* exception_enter(), ...	*/
Ingo Molnar's avatar
Ingo Molnar committed
17

18
19
#include <asm/traps.h>			/* dotraplinkage, ...		*/
#include <asm/pgalloc.h>		/* pgd_*(), ...			*/
Vegard Nossum's avatar
Vegard Nossum committed
20
#include <asm/kmemcheck.h>		/* kmemcheck_*(), ...		*/
21
#include <asm/fixmap.h>			/* VSYSCALL_START		*/
Linus Torvalds's avatar
Linus Torvalds committed
22

23
24
25
#define CREATE_TRACE_POINTS
#include <asm/trace/exceptions.h>

26
/*
Ingo Molnar's avatar
Ingo Molnar committed
27
28
29
30
31
32
33
 * Page fault error code bits:
 *
 *   bit 0 ==	 0: no page found	1: protection fault
 *   bit 1 ==	 0: read access		1: write access
 *   bit 2 ==	 0: kernel-mode access	1: user-mode access
 *   bit 3 ==				1: use of reserved bit detected
 *   bit 4 ==				1: fault was an instruction fetch
34
 */
Ingo Molnar's avatar
Ingo Molnar committed
35
36
37
38
39
40
41
42
enum x86_pf_error_code {

	PF_PROT		=		1 << 0,
	PF_WRITE	=		1 << 1,
	PF_USER		=		1 << 2,
	PF_RSVD		=		1 << 3,
	PF_INSTR	=		1 << 4,
};
43

44
/*
45
46
 * Returns 0 if mmiotrace is disabled, or if the fault is not
 * handled by mmiotrace:
47
 */
48
49
static inline int __kprobes
kmmio_fault(struct pt_regs *regs, unsigned long addr)
50
{
51
52
53
54
	if (unlikely(is_kmmio_active()))
		if (kmmio_handler(regs, addr) == 1)
			return -1;
	return 0;
55
56
}

57
static inline int __kprobes kprobes_fault(struct pt_regs *regs)
58
{
59
60
61
	int ret = 0;

	/* kprobe_running() needs smp_processor_id() */
62
	if (kprobes_built_in() && !user_mode_vm(regs)) {
63
64
65
66
67
		preempt_disable();
		if (kprobe_running() && kprobe_fault_handler(regs, 14))
			ret = 1;
		preempt_enable();
	}
68

69
	return ret;
70
}
71

72
/*
Ingo Molnar's avatar
Ingo Molnar committed
73
74
75
76
77
78
 * Prefetch quirks:
 *
 * 32-bit mode:
 *
 *   Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
 *   Check that here and ignore it.
79
 *
Ingo Molnar's avatar
Ingo Molnar committed
80
 * 64-bit mode:
81
 *
Ingo Molnar's avatar
Ingo Molnar committed
82
83
84
85
 *   Sometimes the CPU reports invalid exceptions on prefetch.
 *   Check that here and ignore it.
 *
 * Opcode checker based on code by Richard Brunner.
86
 */
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
static inline int
check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
		      unsigned char opcode, int *prefetch)
{
	unsigned char instr_hi = opcode & 0xf0;
	unsigned char instr_lo = opcode & 0x0f;

	switch (instr_hi) {
	case 0x20:
	case 0x30:
		/*
		 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
		 * In X86_64 long mode, the CPU will signal invalid
		 * opcode if some of these prefixes are present so
		 * X86_64 will never get here anyway
		 */
		return ((instr_lo & 7) == 0x6);
#ifdef CONFIG_X86_64
	case 0x40:
		/*
		 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
		 * Need to figure out under what instruction mode the
		 * instruction was issued. Could check the LDT for lm,
		 * but for now it's good enough to assume that long
		 * mode only uses well known segments or kernel.
		 */
113
		return (!user_mode(regs) || user_64bit_mode(regs));
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
#endif
	case 0x60:
		/* 0x64 thru 0x67 are valid prefixes in all modes. */
		return (instr_lo & 0xC) == 0x4;
	case 0xF0:
		/* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
		return !instr_lo || (instr_lo>>1) == 1;
	case 0x00:
		/* Prefetch instruction is 0x0F0D or 0x0F18 */
		if (probe_kernel_address(instr, opcode))
			return 0;

		*prefetch = (instr_lo == 0xF) &&
			(opcode == 0x0D || opcode == 0x18);
		return 0;
	default:
		return 0;
	}
}

Ingo Molnar's avatar
Ingo Molnar committed
134
135
static int
is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
136
{
Ingo Molnar's avatar
Ingo Molnar committed
137
	unsigned char *max_instr;
138
	unsigned char *instr;
139
	int prefetch = 0;
Linus Torvalds's avatar
Linus Torvalds committed
140

Ingo Molnar's avatar
Ingo Molnar committed
141
142
143
144
	/*
	 * If it was a exec (instruction fetch) fault on NX page, then
	 * do not ignore the fault:
	 */
145
	if (error_code & PF_INSTR)
Linus Torvalds's avatar
Linus Torvalds committed
146
		return 0;
147

148
	instr = (void *)convert_ip_to_linear(current, regs);
149
	max_instr = instr + 15;
Linus Torvalds's avatar
Linus Torvalds committed
150

151
	if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
Linus Torvalds's avatar
Linus Torvalds committed
152
153
		return 0;

154
	while (instr < max_instr) {
Ingo Molnar's avatar
Ingo Molnar committed
155
		unsigned char opcode;
Linus Torvalds's avatar
Linus Torvalds committed
156

157
		if (probe_kernel_address(instr, opcode))
158
			break;
Linus Torvalds's avatar
Linus Torvalds committed
159
160
161

		instr++;

162
		if (!check_prefetch_opcode(regs, instr, opcode, &prefetch))
Linus Torvalds's avatar
Linus Torvalds committed
163
164
165
166
167
			break;
	}
	return prefetch;
}

Ingo Molnar's avatar
Ingo Molnar committed
168
169
static void
force_sig_info_fault(int si_signo, int si_code, unsigned long address,
170
		     struct task_struct *tsk, int fault)
171
{
172
	unsigned lsb = 0;
173
174
	siginfo_t info;

Ingo Molnar's avatar
Ingo Molnar committed
175
176
177
178
	info.si_signo	= si_signo;
	info.si_errno	= 0;
	info.si_code	= si_code;
	info.si_addr	= (void __user *)address;
179
180
181
182
183
	if (fault & VM_FAULT_HWPOISON_LARGE)
		lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); 
	if (fault & VM_FAULT_HWPOISON)
		lsb = PAGE_SHIFT;
	info.si_addr_lsb = lsb;
Ingo Molnar's avatar
Ingo Molnar committed
184

185
186
187
	force_sig_info(si_signo, &info, tsk);
}

188
189
190
191
192
DEFINE_SPINLOCK(pgd_lock);
LIST_HEAD(pgd_list);

#ifdef CONFIG_X86_32
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
193
{
194
195
196
197
	unsigned index = pgd_index(address);
	pgd_t *pgd_k;
	pud_t *pud, *pud_k;
	pmd_t *pmd, *pmd_k;
Ingo Molnar's avatar
Ingo Molnar committed
198

199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
	pgd += index;
	pgd_k = init_mm.pgd + index;

	if (!pgd_present(*pgd_k))
		return NULL;

	/*
	 * set_pgd(pgd, *pgd_k); here would be useless on PAE
	 * and redundant with the set_pmd() on non-PAE. As would
	 * set_pud.
	 */
	pud = pud_offset(pgd, address);
	pud_k = pud_offset(pgd_k, address);
	if (!pud_present(*pud_k))
		return NULL;

	pmd = pmd_offset(pud, address);
	pmd_k = pmd_offset(pud_k, address);
	if (!pmd_present(*pmd_k))
		return NULL;

220
	if (!pmd_present(*pmd))
221
		set_pmd(pmd, *pmd_k);
222
	else
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));

	return pmd_k;
}

void vmalloc_sync_all(void)
{
	unsigned long address;

	if (SHARED_KERNEL_PMD)
		return;

	for (address = VMALLOC_START & PMD_MASK;
	     address >= TASK_SIZE && address < FIXADDR_TOP;
	     address += PMD_SIZE) {
		struct page *page;

240
		spin_lock(&pgd_lock);
241
		list_for_each_entry(page, &pgd_list, lru) {
242
			spinlock_t *pgt_lock;
243
			pmd_t *ret;
244

245
			/* the pgt_lock only for Xen */
246
247
248
249
250
251
252
			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;

			spin_lock(pgt_lock);
			ret = vmalloc_sync_one(page_address(page), address);
			spin_unlock(pgt_lock);

			if (!ret)
253
254
				break;
		}
255
		spin_unlock(&pgd_lock);
256
257
258
259
260
261
262
263
	}
}

/*
 * 32-bit:
 *
 *   Handle a fault on the vmalloc or module mapping area
 */
264
static noinline __kprobes int vmalloc_fault(unsigned long address)
265
266
267
268
269
270
271
272
273
{
	unsigned long pgd_paddr;
	pmd_t *pmd_k;
	pte_t *pte_k;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

274
275
	WARN_ON_ONCE(in_nmi());

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
	/*
	 * Synchronize this task's top level page-table
	 * with the 'reference' page table.
	 *
	 * Do _not_ use "current" here. We might be inside
	 * an interrupt in the middle of a task switch..
	 */
	pgd_paddr = read_cr3();
	pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
	if (!pmd_k)
		return -1;

	pte_k = pte_offset_kernel(pmd_k, address);
	if (!pte_present(*pte_k))
		return -1;

	return 0;
}

/*
 * Did it hit the DOS screen memory VA from vm86 mode?
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
	unsigned long bit;

	if (!v8086_mode(regs))
		return;

	bit = (address - 0xA0000) >> PAGE_SHIFT;
	if (bit < 32)
		tsk->thread.screen_bitmap |= 1 << bit;
310
}
Linus Torvalds's avatar
Linus Torvalds committed
311

Akinobu Mita's avatar
Akinobu Mita committed
312
static bool low_pfn(unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
313
{
Akinobu Mita's avatar
Akinobu Mita committed
314
315
	return pfn < max_low_pfn;
}
316

Akinobu Mita's avatar
Akinobu Mita committed
317
318
319
320
321
322
static void dump_pagetable(unsigned long address)
{
	pgd_t *base = __va(read_cr3());
	pgd_t *pgd = &base[pgd_index(address)];
	pmd_t *pmd;
	pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
323

324
#ifdef CONFIG_X86_PAE
Akinobu Mita's avatar
Akinobu Mita committed
325
326
327
	printk("*pdpt = %016Lx ", pgd_val(*pgd));
	if (!low_pfn(pgd_val(*pgd) >> PAGE_SHIFT) || !pgd_present(*pgd))
		goto out;
328
#endif
Akinobu Mita's avatar
Akinobu Mita committed
329
330
	pmd = pmd_offset(pud_offset(pgd, address), address);
	printk(KERN_CONT "*pde = %0*Lx ", sizeof(*pmd) * 2, (u64)pmd_val(*pmd));
331
332
333
334
335

	/*
	 * We must not directly access the pte in the highpte
	 * case if the page table is located in highmem.
	 * And let's rather not kmap-atomic the pte, just in case
Ingo Molnar's avatar
Ingo Molnar committed
336
	 * it's allocated already:
337
	 */
Akinobu Mita's avatar
Akinobu Mita committed
338
339
	if (!low_pfn(pmd_pfn(*pmd)) || !pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
340

Akinobu Mita's avatar
Akinobu Mita committed
341
342
343
	pte = pte_offset_kernel(pmd, address);
	printk("*pte = %0*Lx ", sizeof(*pte) * 2, (u64)pte_val(*pte));
out:
344
	printk("\n");
345
346
347
348
349
350
}

#else /* CONFIG_X86_64: */

void vmalloc_sync_all(void)
{
351
	sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
352
353
354
355
356
357
358
359
360
}

/*
 * 64-bit:
 *
 *   Handle a fault on the vmalloc area
 *
 * This assumes no large pages in there.
 */
361
static noinline __kprobes int vmalloc_fault(unsigned long address)
362
363
364
365
366
367
368
369
370
371
{
	pgd_t *pgd, *pgd_ref;
	pud_t *pud, *pud_ref;
	pmd_t *pmd, *pmd_ref;
	pte_t *pte, *pte_ref;

	/* Make sure we are in vmalloc area: */
	if (!(address >= VMALLOC_START && address < VMALLOC_END))
		return -1;

372
373
	WARN_ON_ONCE(in_nmi());

374
375
376
377
378
379
380
381
382
383
	/*
	 * Copy kernel mappings over when needed. This can also
	 * happen within a race in page table update. In the later
	 * case just flush:
	 */
	pgd = pgd_offset(current->active_mm, address);
	pgd_ref = pgd_offset_k(address);
	if (pgd_none(*pgd_ref))
		return -1;

384
	if (pgd_none(*pgd)) {
385
		set_pgd(pgd, *pgd_ref);
386
387
		arch_flush_lazy_mmu_mode();
	} else {
388
		BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
389
	}
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428

	/*
	 * Below here mismatches are bugs because these lower tables
	 * are shared:
	 */

	pud = pud_offset(pgd, address);
	pud_ref = pud_offset(pgd_ref, address);
	if (pud_none(*pud_ref))
		return -1;

	if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
		BUG();

	pmd = pmd_offset(pud, address);
	pmd_ref = pmd_offset(pud_ref, address);
	if (pmd_none(*pmd_ref))
		return -1;

	if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
		BUG();

	pte_ref = pte_offset_kernel(pmd_ref, address);
	if (!pte_present(*pte_ref))
		return -1;

	pte = pte_offset_kernel(pmd, address);

	/*
	 * Don't use pte_page here, because the mappings can point
	 * outside mem_map, and the NUMA hash lookup cannot handle
	 * that:
	 */
	if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
		BUG();

	return 0;
}

429
#ifdef CONFIG_CPU_SUP_AMD
430
static const char errata93_warning[] =
431
432
433
434
435
KERN_ERR 
"******* Your BIOS seems to not contain a fix for K8 errata #93\n"
"******* Working around it, but it may cause SEGVs or burn power.\n"
"******* Please consider a BIOS update.\n"
"******* Disabling USB legacy in the BIOS may also help.\n";
436
#endif
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455

/*
 * No vm86 mode in 64-bit mode:
 */
static inline void
check_v8086_mode(struct pt_regs *regs, unsigned long address,
		 struct task_struct *tsk)
{
}

static int bad_address(void *p)
{
	unsigned long dummy;

	return probe_kernel_address((unsigned long *)p, dummy);
}

static void dump_pagetable(unsigned long address)
{
Akinobu Mita's avatar
Akinobu Mita committed
456
457
	pgd_t *base = __va(read_cr3() & PHYSICAL_PAGE_MASK);
	pgd_t *pgd = base + pgd_index(address);
Linus Torvalds's avatar
Linus Torvalds committed
458
459
460
461
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

Ingo Molnar's avatar
Ingo Molnar committed
462
463
464
	if (bad_address(pgd))
		goto bad;

465
	printk("PGD %lx ", pgd_val(*pgd));
Ingo Molnar's avatar
Ingo Molnar committed
466
467
468

	if (!pgd_present(*pgd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
469

470
	pud = pud_offset(pgd, address);
Ingo Molnar's avatar
Ingo Molnar committed
471
472
473
	if (bad_address(pud))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
474
	printk("PUD %lx ", pud_val(*pud));
475
	if (!pud_present(*pud) || pud_large(*pud))
Ingo Molnar's avatar
Ingo Molnar committed
476
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
477
478

	pmd = pmd_offset(pud, address);
Ingo Molnar's avatar
Ingo Molnar committed
479
480
481
	if (bad_address(pmd))
		goto bad;

Linus Torvalds's avatar
Linus Torvalds committed
482
	printk("PMD %lx ", pmd_val(*pmd));
Ingo Molnar's avatar
Ingo Molnar committed
483
484
	if (!pmd_present(*pmd) || pmd_large(*pmd))
		goto out;
Linus Torvalds's avatar
Linus Torvalds committed
485
486

	pte = pte_offset_kernel(pmd, address);
Ingo Molnar's avatar
Ingo Molnar committed
487
488
489
	if (bad_address(pte))
		goto bad;

490
	printk("PTE %lx", pte_val(*pte));
Ingo Molnar's avatar
Ingo Molnar committed
491
out:
Linus Torvalds's avatar
Linus Torvalds committed
492
493
494
495
	printk("\n");
	return;
bad:
	printk("BAD\n");
496
497
}

498
#endif /* CONFIG_X86_64 */
Linus Torvalds's avatar
Linus Torvalds committed
499

Ingo Molnar's avatar
Ingo Molnar committed
500
501
502
503
504
505
506
507
508
509
510
511
512
/*
 * Workaround for K8 erratum #93 & buggy BIOS.
 *
 * BIOS SMM functions are required to use a specific workaround
 * to avoid corruption of the 64bit RIP register on C stepping K8.
 *
 * A lot of BIOS that didn't get tested properly miss this.
 *
 * The OS sees this as a page fault with the upper 32bits of RIP cleared.
 * Try to work around it here.
 *
 * Note we only handle faults in kernel here.
 * Does nothing on 32-bit.
513
 */
514
static int is_errata93(struct pt_regs *regs, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
515
{
516
517
518
519
520
#if defined(CONFIG_X86_64) && defined(CONFIG_CPU_SUP_AMD)
	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD
	    || boot_cpu_data.x86 != 0xf)
		return 0;

521
	if (address != regs->ip)
Linus Torvalds's avatar
Linus Torvalds committed
522
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
523

524
	if ((address >> 32) != 0)
Linus Torvalds's avatar
Linus Torvalds committed
525
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
526

Linus Torvalds's avatar
Linus Torvalds committed
527
	address |= 0xffffffffUL << 32;
528
529
	if ((address >= (u64)_stext && address <= (u64)_etext) ||
	    (address >= MODULES_VADDR && address <= MODULES_END)) {
530
		printk_once(errata93_warning);
531
		regs->ip = address;
Linus Torvalds's avatar
Linus Torvalds committed
532
533
		return 1;
	}
534
#endif
Linus Torvalds's avatar
Linus Torvalds committed
535
	return 0;
536
}
Linus Torvalds's avatar
Linus Torvalds committed
537

538
/*
Ingo Molnar's avatar
Ingo Molnar committed
539
540
541
542
543
 * Work around K8 erratum #100 K8 in compat mode occasionally jumps
 * to illegal addresses >4GB.
 *
 * We catch this in the page fault handler because these addresses
 * are not reachable. Just detect this case and return.  Any code
544
545
546
547
548
 * segment in LDT is compatibility mode.
 */
static int is_errata100(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_64
Ingo Molnar's avatar
Ingo Molnar committed
549
	if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
550
551
552
553
554
		return 1;
#endif
	return 0;
}

555
556
557
558
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
{
#ifdef CONFIG_X86_F00F_BUG
	unsigned long nr;
Ingo Molnar's avatar
Ingo Molnar committed
559

560
	/*
Ingo Molnar's avatar
Ingo Molnar committed
561
	 * Pentium F0 0F C7 C8 bug workaround:
562
	 */
563
	if (boot_cpu_has_bug(X86_BUG_F00F)) {
564
565
566
567
568
569
570
571
572
573
574
		nr = (address - idt_descr.address) >> 3;

		if (nr == 6) {
			do_invalid_op(regs, 0);
			return 1;
		}
	}
#endif
	return 0;
}

575
576
577
static const char nx_warning[] = KERN_CRIT
"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";

Ingo Molnar's avatar
Ingo Molnar committed
578
579
580
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
581
{
582
583
584
585
	if (!oops_may_print())
		return;

	if (error_code & PF_INSTR) {
586
		unsigned int level;
587
588
		pgd_t *pgd;
		pte_t *pte;
Ingo Molnar's avatar
Ingo Molnar committed
589

590
591
592
593
		pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
		pgd += pgd_index(address);

		pte = lookup_address_in_pgd(pgd, address, &level);
594

595
		if (pte && pte_present(*pte) && !pte_exec(*pte))
596
			printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
597
598
	}

599
	printk(KERN_ALERT "BUG: unable to handle kernel ");
600
	if (address < PAGE_SIZE)
601
		printk(KERN_CONT "NULL pointer dereference");
602
	else
603
		printk(KERN_CONT "paging request");
Ingo Molnar's avatar
Ingo Molnar committed
604

605
	printk(KERN_CONT " at %p\n", (void *) address);
606
	printk(KERN_ALERT "IP:");
607
	printk_address(regs->ip);
Ingo Molnar's avatar
Ingo Molnar committed
608

609
610
611
	dump_pagetable(address);
}

Ingo Molnar's avatar
Ingo Molnar committed
612
613
614
static noinline void
pgtable_bad(struct pt_regs *regs, unsigned long error_code,
	    unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
615
{
Ingo Molnar's avatar
Ingo Molnar committed
616
617
618
619
620
621
622
	struct task_struct *tsk;
	unsigned long flags;
	int sig;

	flags = oops_begin();
	tsk = current;
	sig = SIGKILL;
623

Linus Torvalds's avatar
Linus Torvalds committed
624
	printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
625
	       tsk->comm, address);
Linus Torvalds's avatar
Linus Torvalds committed
626
	dump_pagetable(address);
Ingo Molnar's avatar
Ingo Molnar committed
627
628

	tsk->thread.cr2		= address;
629
	tsk->thread.trap_nr	= X86_TRAP_PF;
Ingo Molnar's avatar
Ingo Molnar committed
630
631
	tsk->thread.error_code	= error_code;

632
	if (__die("Bad pagetable", regs, error_code))
633
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
634

635
	oops_end(flags, regs, sig);
Linus Torvalds's avatar
Linus Torvalds committed
636
637
}

Ingo Molnar's avatar
Ingo Molnar committed
638
639
static noinline void
no_context(struct pt_regs *regs, unsigned long error_code,
640
	   unsigned long address, int signal, int si_code)
641
642
{
	struct task_struct *tsk = current;
643
	unsigned long *stackend;
644
645
646
	unsigned long flags;
	int sig;

Ingo Molnar's avatar
Ingo Molnar committed
647
	/* Are we prepared to handle this kernel fault? */
648
	if (fixup_exception(regs)) {
649
650
651
652
653
654
655
656
657
658
659
660
661
662
		/*
		 * Any interrupt that takes a fault gets the fixup. This makes
		 * the below recursive fault logic only apply to a faults from
		 * task context.
		 */
		if (in_interrupt())
			return;

		/*
		 * Per the above we're !in_interrupt(), aka. task context.
		 *
		 * In this case we need to make sure we're not recursively
		 * faulting through the emulate_vsyscall() logic.
		 */
663
		if (current_thread_info()->sig_on_uaccess_error && signal) {
664
			tsk->thread.trap_nr = X86_TRAP_PF;
665
666
667
668
669
670
			tsk->thread.error_code = error_code | PF_USER;
			tsk->thread.cr2 = address;

			/* XXX: hwpoison faults will set the wrong code. */
			force_sig_info_fault(signal, si_code, address, tsk, 0);
		}
671
672
673
674

		/*
		 * Barring that, we can do the fixup and be happy.
		 */
675
		return;
676
	}
677
678

	/*
Ingo Molnar's avatar
Ingo Molnar committed
679
680
681
682
683
684
685
	 * 32-bit:
	 *
	 *   Valid to do another page fault here, because if this fault
	 *   had been triggered by is_prefetch fixup_exception would have
	 *   handled it.
	 *
	 * 64-bit:
686
	 *
Ingo Molnar's avatar
Ingo Molnar committed
687
	 *   Hall of shame of CPU/BIOS bugs.
688
689
690
691
692
693
694
695
696
	 */
	if (is_prefetch(regs, error_code, address))
		return;

	if (is_errata93(regs, address))
		return;

	/*
	 * Oops. The kernel tried to access some bad page. We'll have to
Ingo Molnar's avatar
Ingo Molnar committed
697
	 * terminate things with extreme prejudice:
698
699
700
701
702
	 */
	flags = oops_begin();

	show_fault_oops(regs, error_code, address);

Ingo Molnar's avatar
Ingo Molnar committed
703
	stackend = end_of_stack(tsk);
704
	if (tsk != &init_task && *stackend != STACK_END_MAGIC)
705
		printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
706

707
	tsk->thread.cr2		= address;
708
	tsk->thread.trap_nr	= X86_TRAP_PF;
709
	tsk->thread.error_code	= error_code;
710
711
712
713

	sig = SIGKILL;
	if (__die("Oops", regs, error_code))
		sig = 0;
Ingo Molnar's avatar
Ingo Molnar committed
714

715
	/* Executive summary in case the body of the oops scrolled away */
716
	printk(KERN_DEFAULT "CR2: %016lx\n", address);
Ingo Molnar's avatar
Ingo Molnar committed
717

718
719
720
	oops_end(flags, regs, sig);
}

Ingo Molnar's avatar
Ingo Molnar committed
721
722
723
724
725
726
727
728
729
730
731
732
733
734
/*
 * Print out info about fatal segfaults, if the show_unhandled_signals
 * sysctl is set:
 */
static inline void
show_signal_msg(struct pt_regs *regs, unsigned long error_code,
		unsigned long address, struct task_struct *tsk)
{
	if (!unhandled_signal(tsk, SIGSEGV))
		return;

	if (!printk_ratelimit())
		return;

735
	printk("%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
Ingo Molnar's avatar
Ingo Molnar committed
736
737
738
739
740
741
742
743
744
745
746
747
		task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
		tsk->comm, task_pid_nr(tsk), address,
		(void *)regs->ip, (void *)regs->sp, error_code);

	print_vma_addr(KERN_CONT " in ", regs->ip);

	printk(KERN_CONT "\n");
}

static void
__bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		       unsigned long address, int si_code)
748
749
750
751
752
753
{
	struct task_struct *tsk = current;

	/* User mode accesses just cause a SIGSEGV */
	if (error_code & PF_USER) {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
754
		 * It's possible to have interrupts off here:
755
756
757
758
759
		 */
		local_irq_enable();

		/*
		 * Valid to do another page fault here because this one came
Ingo Molnar's avatar
Ingo Molnar committed
760
		 * from user space:
761
762
763
764
765
766
767
		 */
		if (is_prefetch(regs, error_code, address))
			return;

		if (is_errata100(regs, address))
			return;

768
769
770
771
772
773
774
775
776
777
778
#ifdef CONFIG_X86_64
		/*
		 * Instruction fetch faults in the vsyscall page might need
		 * emulation.
		 */
		if (unlikely((error_code & PF_INSTR) &&
			     ((address & ~0xfff) == VSYSCALL_START))) {
			if (emulate_vsyscall(regs, address))
				return;
		}
#endif
779
780
781
		/* Kernel addresses are always protection faults: */
		if (address >= TASK_SIZE)
			error_code |= PF_PROT;
782

783
		if (likely(show_unhandled_signals))
Ingo Molnar's avatar
Ingo Molnar committed
784
785
786
			show_signal_msg(regs, error_code, address, tsk);

		tsk->thread.cr2		= address;
787
		tsk->thread.error_code	= error_code;
788
		tsk->thread.trap_nr	= X86_TRAP_PF;
789

790
		force_sig_info_fault(SIGSEGV, si_code, address, tsk, 0);
Ingo Molnar's avatar
Ingo Molnar committed
791

792
793
794
795
796
797
		return;
	}

	if (is_f00f_bug(regs, address))
		return;

798
	no_context(regs, error_code, address, SIGSEGV, si_code);
799
800
}

Ingo Molnar's avatar
Ingo Molnar committed
801
802
803
static noinline void
bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
		     unsigned long address)
804
805
806
807
{
	__bad_area_nosemaphore(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
808
809
810
static void
__bad_area(struct pt_regs *regs, unsigned long error_code,
	   unsigned long address, int si_code)
811
812
813
814
815
816
817
818
819
820
821
822
{
	struct mm_struct *mm = current->mm;

	/*
	 * Something tried to access memory that isn't in our memory map..
	 * Fix it, but check if it's kernel or user first..
	 */
	up_read(&mm->mmap_sem);

	__bad_area_nosemaphore(regs, error_code, address, si_code);
}

Ingo Molnar's avatar
Ingo Molnar committed
823
824
static noinline void
bad_area(struct pt_regs *regs, unsigned long error_code, unsigned long address)
825
826
827
828
{
	__bad_area(regs, error_code, address, SEGV_MAPERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
829
830
831
static noinline void
bad_area_access_error(struct pt_regs *regs, unsigned long error_code,
		      unsigned long address)
832
833
834
835
{
	__bad_area(regs, error_code, address, SEGV_ACCERR);
}

Ingo Molnar's avatar
Ingo Molnar committed
836
static void
837
838
do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
	  unsigned int fault)
839
840
841
{
	struct task_struct *tsk = current;
	struct mm_struct *mm = tsk->mm;
842
	int code = BUS_ADRERR;
843
844
845

	up_read(&mm->mmap_sem);

Ingo Molnar's avatar
Ingo Molnar committed
846
	/* Kernel mode? Handle exceptions or die: */
847
	if (!(error_code & PF_USER)) {
848
		no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
849
850
		return;
	}
Ingo Molnar's avatar
Ingo Molnar committed
851

852
	/* User-space => ok to do another page fault: */
853
854
	if (is_prefetch(regs, error_code, address))
		return;
Ingo Molnar's avatar
Ingo Molnar committed
855
856
857

	tsk->thread.cr2		= address;
	tsk->thread.error_code	= error_code;
858
	tsk->thread.trap_nr	= X86_TRAP_PF;
Ingo Molnar's avatar
Ingo Molnar committed
859

860
#ifdef CONFIG_MEMORY_FAILURE
861
	if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
862
863
864
865
866
867
		printk(KERN_ERR
	"MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
			tsk->comm, tsk->pid, address);
		code = BUS_MCEERR_AR;
	}
#endif
868
	force_sig_info_fault(SIGBUS, code, address, tsk, fault);
869
870
}

871
static noinline void
Ingo Molnar's avatar
Ingo Molnar committed
872
873
mm_fault_error(struct pt_regs *regs, unsigned long error_code,
	       unsigned long address, unsigned int fault)
874
{
875
876
877
878
	if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
		up_read(&current->mm->mmap_sem);
		no_context(regs, error_code, address, 0, 0);
		return;
879
880
	}

Ingo Molnar's avatar
Ingo Molnar committed
881
	if (fault & VM_FAULT_OOM) {
882
883
884
		/* Kernel mode? Handle exceptions or die: */
		if (!(error_code & PF_USER)) {
			up_read(&current->mm->mmap_sem);
885
886
			no_context(regs, error_code, address,
				   SIGSEGV, SEGV_MAPERR);
887
			return;
888
889
		}

890
891
892
893
894
895
896
897
		up_read(&current->mm->mmap_sem);

		/*
		 * We ran out of memory, call the OOM killer, and return the
		 * userspace (which will retry the fault, or kill us if we got
		 * oom-killed):
		 */
		pagefault_out_of_memory();
Ingo Molnar's avatar
Ingo Molnar committed
898
	} else {
899
900
		if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
			     VM_FAULT_HWPOISON_LARGE))
901
			do_sigbus(regs, error_code, address, fault);
Ingo Molnar's avatar
Ingo Molnar committed
902
903
904
		else
			BUG();
	}
905
906
}

907
908
909
910
static int spurious_fault_check(unsigned long error_code, pte_t *pte)
{
	if ((error_code & PF_WRITE) && !pte_write(*pte))
		return 0;
Ingo Molnar's avatar
Ingo Molnar committed
911

912
913
914
915
916
917
	if ((error_code & PF_INSTR) && !pte_exec(*pte))
		return 0;

	return 1;
}

918
/*
Ingo Molnar's avatar
Ingo Molnar committed
919
920
921
922
923
924
925
926
 * Handle a spurious fault caused by a stale TLB entry.
 *
 * This allows us to lazily refresh the TLB when increasing the
 * permissions of a kernel page (RO -> RW or NX -> X).  Doing it
 * eagerly is very expensive since that implies doing a full
 * cross-processor TLB flush, even if no stale TLB entries exist
 * on other processors.
 *
927
928
929
 * There are no security implications to leaving a stale TLB when
 * increasing the permissions on a page.
 */
930
static noinline __kprobes int
Ingo Molnar's avatar
Ingo Molnar committed
931
spurious_fault(unsigned long error_code, unsigned long address)
932
933
934
935
936
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
937
	int ret;
938
939
940
941
942
943
944
945
946
947
948
949
950

	/* Reserved-bit violation or user access to kernel space? */
	if (error_code & (PF_USER | PF_RSVD))
		return 0;

	pgd = init_mm.pgd + pgd_index(address);
	if (!pgd_present(*pgd))
		return 0;

	pud = pud_offset(pgd, address);
	if (!pud_present(*pud))
		return 0;

951
952
953
	if (pud_large(*pud))
		return spurious_fault_check(error_code, (pte_t *) pud);

954
955
956
957
	pmd = pmd_offset(pud, address);
	if (!pmd_present(*pmd))
		return 0;

958
959
960
	if (pmd_large(*pmd))
		return spurious_fault_check(error_code, (pte_t *) pmd);

961
	pte = pte_offset_kernel(pmd, address);
962
	if (!pte_present(*pte))
963
964
		return 0;

965
966
967
968
969
	ret = spurious_fault_check(error_code, pte);
	if (!ret)
		return 0;

	/*
Ingo Molnar's avatar
Ingo Molnar committed
970
971
	 * Make sure we have permissions in PMD.
	 * If not, then there's a bug in the page tables:
972
973
974
	 */
	ret = spurious_fault_check(error_code, (pte_t *) pmd);
	WARN_ONCE(!ret, "PMD has incorrect permission bits\n");
Ingo Molnar's avatar
Ingo Molnar committed
975

976
	return ret;
977
978
}

979
int show_unhandled_signals = 1;
Linus Torvalds's avatar
Linus Torvalds committed
980

Ingo Molnar's avatar
Ingo Molnar committed
981
static inline int
982
access_error(unsigned long error_code, struct vm_area_struct *vma)
983
{
984
	if (error_code & PF_WRITE) {
Ingo Molnar's avatar
Ingo Molnar committed
985
		/* write, present and write, not present: */
986
987
		if (unlikely(!(vma->vm_flags & VM_WRITE)))
			return 1;
Ingo Molnar's avatar
Ingo Molnar committed
988
		return 0;
989
990
	}

Ingo Molnar's avatar
Ingo Molnar committed
991
992
993
994
995
996
997
998
	/* read, present: */
	if (unlikely(error_code & PF_PROT))
		return 1;

	/* read, not present: */
	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
		return 1;

999
1000
1001
	return 0;
}

1002
1003
static int fault_in_kernel_space(unsigned long address)
{
1004
	return address >= TASK_SIZE_MAX;
1005
1006
}

1007
1008
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
1009
1010
1011
1012
1013
1014
	if (!IS_ENABLED(CONFIG_X86_SMAP))
		return false;

	if (!static_cpu_has(X86_FEATURE_SMAP))
		return false;

1015
1016
1017
1018
1019
1020
1021
1022
1023
	if (error_code & PF_USER)
		return false;

	if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
		return false;

	return true;
}

Linus Torvalds's avatar
Linus Torvalds committed
1024
1025
1026
1027
/*
 * This routine handles page faults.  It determines the address,
 * and the problem, and then passes it off to one of the appropriate
 * routines.
1028
1029
1030
1031
 *
 * This function must have noinline because both callers
 * {,trace_}do_page_fault() have notrace on. Having this an actual function
 * guarantees there's a function trace entry.
Linus Torvalds's avatar
Linus Torvalds committed
1032
 */
1033
static void __kprobes noinline
1034
1035
__do_page_fault(struct pt_regs *regs, unsigned long error_code,
		unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
1036
{
Ingo Molnar's avatar
Ingo Molnar committed
1037
	struct vm_area_struct *vma;
Linus Torvalds's avatar
Linus Torvalds committed
1038
1039
	struct task_struct *tsk;
	struct mm_struct *mm;
1040
	int fault;
1041
	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
Linus Torvalds's avatar
Linus Torvalds committed
1042

1043
1044
	tsk = current;
	mm = tsk->mm;
Ingo Molnar's avatar
Ingo Molnar committed
1045

Vegard Nossum's avatar
Vegard Nossum committed
1046
1047
1048
1049
1050
1051
	/*
	 * Detect and handle instructions that would cause a page fault for
	 * both a tracked kernel page and a userspace page.
	 */
	if (kmemcheck_active(regs))
		kmemcheck_hide(regs);
1052
	prefetchw(&mm->mmap_sem);
Vegard Nossum's avatar
Vegard Nossum committed
1053

1054
	if (unlikely(kmmio_fault(regs, address)))
1055
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067

	/*
	 * We fault-in kernel-space virtual memory on-demand. The
	 * 'reference' page table is init_mm.pgd.
	 *
	 * NOTE! We MUST NOT take any locks for this case. We may
	 * be in an interrupt or a critical region, and should
	 * only copy the information from the master page table,
	 * nothing more.
	 *
	 * This verifies that the fault happens in kernel space
	 * (error_code & 4) == 0, and that the fault was not a
1068
	 * protection error (error_code & 9) == 0.
Linus Torvalds's avatar
Linus Torvalds committed
1069
	 */
1070
	if (unlikely(fault_in_kernel_space(address))) {
Vegard Nossum's avatar
Vegard Nossum committed
1071
1072
1073
1074
1075
1076
1077
		if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
			if (vmalloc_fault(address) >= 0)
				return;

			if (kmemcheck_fault(regs, address, error_code))
				return;
		}
1078

Ingo Molnar's avatar
Ingo Molnar committed
1079
		/* Can handle a stale RO->RW TLB: */
1080
		if (spurious_fault(error_code, address))
1081
1082
			return;

Ingo Molnar's avatar
Ingo Molnar committed
1083
		/* kprobes don't want to hook the spurious faults: */
1084
		if (kprobes_fault(regs))
1085
			return;
1086
1087
		/*
		 * Don't take the mm semaphore here. If we fixup a prefetch
Ingo Molnar's avatar
Ingo Molnar committed
1088
		 * fault we could otherwise deadlock:
1089
		 */
1090
		bad_area_nosemaphore(regs, error_code, address);
Ingo Molnar's avatar
Ingo Molnar committed
1091

1092
		return;
1093
1094
	}

Ingo Molnar's avatar
Ingo Molnar committed
1095
	/* kprobes don't want to hook the spurious faults: */
1096
	if (unlikely(kprobes_fault(regs)))
1097
		return;
1098

1099
	if (unlikely(error_code & PF_RSVD))
1100
		pgtable_bad(regs, error_code, address);
Linus Torvalds's avatar
Linus Torvalds committed
1101

1102
1103
1104
	if (unlikely(smap_violation(error_code, regs))) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
1105
1106
	}

Linus Torvalds's avatar
Linus Torvalds committed
1107
	/*
Ingo Molnar's avatar
Ingo Molnar committed
1108
1109
	 * If we're in an interrupt, have no user context or are running
	 * in an atomic region then we must not take the fault:
Linus Torvalds's avatar
Linus Torvalds committed
1110
	 */
1111
1112
1113
1114
	if (unlikely(in_atomic() || !mm)) {
		bad_area_nosemaphore(regs, error_code, address);
		return;
	}
Linus Torvalds's avatar
Linus Torvalds committed
1115

1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
	/*
	 * It's safe to allow irq's after cr2 has been saved and the
	 * vmalloc fault has been handled.
	 *
	 * User-mode registers count as a user access even for any
	 * potential system fault or CPU buglet:
	 */
	if (user_mode_vm(regs)) {
		local_irq_enable();
		error_code |= PF_USER;
		flags |= FAULT_FLAG_USER;
	} else {
		if (regs->flags & X86_EFLAGS_IF)
			local_irq_enable();
	}

	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);

1134
1135
1136
	if (error_code & PF_WRITE)
		flags |= FAULT_FLAG_WRITE;

1137
1138
	/*
	 * When running in the kernel we expect faults to occur only to
Ingo Molnar's avatar
Ingo Molnar committed
1139
1140
1141
1142
1143
1144
1145
	 * addresses in user space.  All other faults represent errors in
	 * the kernel and should generate an OOPS.  Unfortunately, in the
	 * case of an erroneous fault occurring in a code path which already
	 * holds mmap_sem we will deadlock attempting to validate the fault
	 * against the address space.  Luckily the kernel only validly
	 * references user space from well defined areas of code, which are
	 * listed in the exceptions table.
Linus Torvalds's avatar
Linus Torvalds committed
1146
1147
	 *
	 * As the vast majority of faults will be valid we will only perform
Ingo Molnar's avatar
Ingo Molnar committed
1148
1149
1150
1151
	 * the source reference check when there is a possibility of a
	 * deadlock. Attempt to lock the address space, if we cannot we then
	 * validate the source. If this is invalid we can skip the address
	 * space check, thus avoiding the deadlock:
Linus Torvalds's avatar
Linus Torvalds committed
1152
	 */
1153
	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
1154
		if ((error_code & PF_USER) == 0 &&
1155
1156
1157
1158
		    !search_exception_tables(regs->ip)) {
			bad_area_nosemaphore(regs, error_code, address);
			return;
		}
1159
retry:
Linus Torvalds's avatar
Linus Torvalds committed
1160
		down_read(&mm->mmap_sem);
1161
1162
	} else {
		/*
Ingo Molnar's avatar
Ingo Molnar committed
1163
1164
1165
		 * The above down_read_trylock() might have succeeded in
		 * which case we'll have missed the might_sleep() from
		 * down_read():
1166
1167
		 */
		might_sleep();
Linus Torvalds's avatar
Linus Torvalds committed
1168
1169
1170
	}

	vma = find_vma(mm, address);
1171
1172
1173
1174
1175
	if (unlikely(!vma)) {
		bad_area(regs, error_code, address);
		return;
	}
	if (likely(vma->vm_start <= address))
Linus Torvalds's avatar
Linus Torvalds committed
1176
		goto good_area;
1177
1178
1179
1180
	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
		bad_area(regs, error_code, address);
		return;
	}
1181
	if (error_code & PF_USER) {
1182
1183
1184
		/*
		 * Accessing the stack below %sp is always a bug.
		 * The large cushion allows instructions like enter
Ingo Molnar's avatar
Ingo Molnar committed
1185
		 * and pusha to work. ("enter $65535, $31" pushes