mm.h 66.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
#ifndef _LINUX_MM_H
#define _LINUX_MM_H

#include <linux/errno.h>

#ifdef __KERNEL__

8
#include <linux/mmdebug.h>
Linus Torvalds's avatar
Linus Torvalds committed
9
#include <linux/gfp.h>
10
#include <linux/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
11
12
13
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
14
#include <linux/atomic.h>
15
#include <linux/debug_locks.h>
16
#include <linux/mm_types.h>
17
#include <linux/range.h>
18
#include <linux/pfn.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
19
#include <linux/bit_spinlock.h>
20
#include <linux/shrinker.h>
Linus Torvalds's avatar
Linus Torvalds committed
21
22
23

struct mempolicy;
struct anon_vma;
24
struct anon_vma_chain;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
25
struct file_ra_state;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
26
struct user_struct;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
27
struct writeback_control;
Linus Torvalds's avatar
Linus Torvalds committed
28

29
#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
Linus Torvalds's avatar
Linus Torvalds committed
30
extern unsigned long max_mapnr;
31
32
33
34
35
36
37

static inline void set_max_mapnr(unsigned long limit)
{
	max_mapnr = limit;
}
#else
static inline void set_max_mapnr(unsigned long limit) { }
Linus Torvalds's avatar
Linus Torvalds committed
38
39
#endif

40
extern unsigned long totalram_pages;
Linus Torvalds's avatar
Linus Torvalds committed
41
42
43
44
45
46
47
48
49
50
51
52
53
extern void * high_memory;
extern int page_cluster;

#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif

#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>

54
55
56
57
#ifndef __pa_symbol
#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif

58
extern unsigned long sysctl_user_reserve_kbytes;
59
extern unsigned long sysctl_admin_reserve_kbytes;
60

61
62
63
64
65
66
67
68
69
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;

extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);
extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);

Linus Torvalds's avatar
Linus Torvalds committed
70
71
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

72
73
74
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

75
76
77
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)addr, PAGE_SIZE)

Linus Torvalds's avatar
Linus Torvalds committed
78
79
80
81
82
83
84
85
86
/*
 * Linux kernel virtual memory manager primitives.
 * The idea being to have a "virtual" mm in the same way
 * we have a virtual fs - giving a cleaner interface to the
 * mm details, and allowing different kinds of memory mappings
 * (from shared memory to executable loading to arbitrary
 * mmap() functions).
 */

87
88
extern struct kmem_cache *vm_area_cachep;

Linus Torvalds's avatar
Linus Torvalds committed
89
#ifndef CONFIG_MMU
90
91
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
Linus Torvalds's avatar
Linus Torvalds committed
92
93
94
95
96

extern unsigned int kobjsize(const void *objp);
#endif

/*
Hugh Dickins's avatar
Hugh Dickins committed
97
 * vm_flags in vm_area_struct, see mm_types.h.
Linus Torvalds's avatar
Linus Torvalds committed
98
 */
99
100
#define VM_NONE		0x00000000

Linus Torvalds's avatar
Linus Torvalds committed
101
102
103
104
105
#define VM_READ		0x00000001	/* currently active flags */
#define VM_WRITE	0x00000002
#define VM_EXEC		0x00000004
#define VM_SHARED	0x00000008

106
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
Linus Torvalds's avatar
Linus Torvalds committed
107
108
109
110
111
112
#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VM_MAYWRITE	0x00000020
#define VM_MAYEXEC	0x00000040
#define VM_MAYSHARE	0x00000080

#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
113
#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
Linus Torvalds's avatar
Linus Torvalds committed
114
115
116
117
118
119
120
121
122
123
124
125
#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */

#define VM_LOCKED	0x00002000
#define VM_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
126
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
Linus Torvalds's avatar
Linus Torvalds committed
127
128
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
129
#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
130
#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
131

132
133
134
135
136
137
#ifdef CONFIG_MEM_SOFT_DIRTY
# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
#else
# define VM_SOFTDIRTY	0
#endif

Jared Hulbert's avatar
Jared Hulbert committed
138
#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
139
140
#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
Hugh Dickins's avatar
Hugh Dickins committed
141
#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
Linus Torvalds's avatar
Linus Torvalds committed
142

143
144
145
146
147
148
#if defined(CONFIG_X86)
# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP	VM_ARCH_1
149
150
#elif defined(CONFIG_METAG)
# define VM_GROWSUP	VM_ARCH_1
151
152
153
154
155
156
157
158
159
160
#elif defined(CONFIG_IA64)
# define VM_GROWSUP	VM_ARCH_1
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
#endif

#ifndef VM_GROWSUP
# define VM_GROWSUP	VM_NONE
#endif

161
162
163
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)

Linus Torvalds's avatar
Linus Torvalds committed
164
165
166
167
168
169
170
171
172
173
#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif

#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#else
#define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
#endif

174
/*
175
176
 * Special vmas that are non-mergable, non-mlock()able.
 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
177
 */
178
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
179

180
181
182
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE

Linus Torvalds's avatar
Linus Torvalds committed
183
184
185
186
187
188
/*
 * mapping from the currently active vm_flags protection bits (the
 * low four bits) to a page protection mask..
 */
extern pgprot_t protection_map[16];

Nick Piggin's avatar
Nick Piggin committed
189
190
#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
#define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
191
#define FAULT_FLAG_MKWRITE	0x04	/* Fault was mkwrite of existing pte */
192
#define FAULT_FLAG_ALLOW_RETRY	0x08	/* Retry fault if blocking */
193
#define FAULT_FLAG_RETRY_NOWAIT	0x10	/* Don't drop mmap_sem and wait when retrying */
194
#define FAULT_FLAG_KILLABLE	0x20	/* The fault task is in SIGKILL killable region */
195
#define FAULT_FLAG_TRIED	0x40	/* second try */
196
#define FAULT_FLAG_USER		0x80	/* The fault originated in userspace */
Nick Piggin's avatar
Nick Piggin committed
197

198
/*
Nick Piggin's avatar
Nick Piggin committed
199
 * vm_fault is filled by the the pagefault handler and passed to the vma's
Nick Piggin's avatar
Nick Piggin committed
200
201
 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 * of VM_FAULT_xxx flags that give details about how the fault was handled.
202
 *
Nick Piggin's avatar
Nick Piggin committed
203
 * pgoff should be used in favour of virtual_address, if possible. If pgoff
204
 * is used, one may implement ->remap_pages to get nonlinear mapping support.
205
 */
Nick Piggin's avatar
Nick Piggin committed
206
207
208
209
210
211
struct vm_fault {
	unsigned int flags;		/* FAULT_FLAG_xxx flags */
	pgoff_t pgoff;			/* Logical page offset based on vma */
	void __user *virtual_address;	/* Faulting virtual address */

	struct page *page;		/* ->fault handlers should return a
Nick Piggin's avatar
Nick Piggin committed
212
					 * page here, unless VM_FAULT_NOPAGE
Nick Piggin's avatar
Nick Piggin committed
213
					 * is set (which is also implied by
Nick Piggin's avatar
Nick Piggin committed
214
					 * VM_FAULT_ERROR).
Nick Piggin's avatar
Nick Piggin committed
215
					 */
216
217
218
219
	/* for ->map_pages() only */
	pgoff_t max_pgoff;		/* map pages for offset from pgoff till
					 * max_pgoff inclusive */
	pte_t *pte;			/* pte entry associated with ->pgoff */
220
};
Linus Torvalds's avatar
Linus Torvalds committed
221
222
223
224
225
226
227
228
229

/*
 * These are the virtual MM functions - opening of an area, closing and
 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 * to the functions called when a no-page or a wp-page exception occurs. 
 */
struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
Nick Piggin's avatar
Nick Piggin committed
230
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
231
	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
232
233
234

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
235
	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
236
237
238
239
240
241

	/* called by access_process_vm when get_user_pages() fails, typically
	 * for use by special VMAs that can switch between memory and hardware
	 */
	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
Linus Torvalds's avatar
Linus Torvalds committed
242
#ifdef CONFIG_NUMA
243
244
245
246
247
248
249
	/*
	 * set_policy() op must add a reference to any non-NULL @new mempolicy
	 * to hold the policy upon return.  Caller should pass NULL @new to
	 * remove a policy and fall back to surrounding context--i.e. do not
	 * install a MPOL_DEFAULT policy, nor the task or system default
	 * mempolicy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
250
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
251
252
253
254
255
256
257
258
259
260
261

	/*
	 * get_policy() op must add reference [mpol_get()] to any policy at
	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
	 * in mm/mempolicy.c will do this automatically.
	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
	 * must return NULL--i.e., do not "fallback" to task or system default
	 * policy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
262
263
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
264
265
	int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
		const nodemask_t *to, unsigned long flags);
Linus Torvalds's avatar
Linus Torvalds committed
266
#endif
267
268
269
	/* called by sys_remap_file_pages() to populate non-linear mapping */
	int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
			   unsigned long size, pgoff_t pgoff);
Linus Torvalds's avatar
Linus Torvalds committed
270
271
272
273
274
};

struct mmu_gather;
struct inode;

Andrew Morton's avatar
Andrew Morton committed
275
276
#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))
277

278
279
280
/* It's valid only if the page is free path or free_list */
static inline void set_freepage_migratetype(struct page *page, int migratetype)
{
281
	page->index = migratetype;
282
283
284
285
286
}

/* It's valid only if the page is free path or free_list */
static inline int get_freepage_migratetype(struct page *page)
{
287
	return page->index;
288
289
}

Linus Torvalds's avatar
Linus Torvalds committed
290
291
292
293
294
/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
 */
#include <linux/page-flags.h>
295
#include <linux/huge_mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310

/*
 * Methods to modify the page usage count.
 *
 * What counts for a page usage:
 * - cache mapping   (page->mapping)
 * - private data    (page->private)
 * - page mapped in a task's page tables, each mapping
 *   is counted separately
 *
 * Also, many kernel routines increase the page count before a critical
 * routine so they can be sure the page doesn't go away from under them.
 */

/*
Nick Piggin's avatar
Nick Piggin committed
311
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
Linus Torvalds's avatar
Linus Torvalds committed
312
 */
313
314
static inline int put_page_testzero(struct page *page)
{
315
	VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
316
	return atomic_dec_and_test(&page->_count);
317
}
Linus Torvalds's avatar
Linus Torvalds committed
318
319

/*
320
321
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
322
323
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
Linus Torvalds's avatar
Linus Torvalds committed
324
 */
325
326
static inline int get_page_unless_zero(struct page *page)
{
327
	return atomic_inc_not_zero(&page->_count);
328
}
Linus Torvalds's avatar
Linus Torvalds committed
329

330
331
332
333
334
335
336
337
338
339
340
341
/*
 * Try to drop a ref unless the page has a refcount of one, return false if
 * that is the case.
 * This is to make sure that the refcount won't become zero after this drop.
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
 */
static inline int put_page_unless_one(struct page *page)
{
	return atomic_add_unless(&page->_count, -1, 1);
}

342
343
extern int page_is_ram(unsigned long pfn);

344
/* Support for virtually mapped pages */
345
346
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
347

348
349
350
351
352
353
/*
 * Determine if an address is within the vmalloc range
 *
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */
354
355
static inline int is_vmalloc_addr(const void *x)
{
356
#ifdef CONFIG_MMU
357
358
359
	unsigned long addr = (unsigned long)x;

	return addr >= VMALLOC_START && addr < VMALLOC_END;
360
361
#else
	return 0;
362
#endif
363
}
364
365
366
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
367
static inline int is_vmalloc_or_module_addr(const void *x)
368
369
370
371
{
	return 0;
}
#endif
372

Al Viro's avatar
Al Viro committed
373
374
extern void kvfree(const void *addr);

Andrea Arcangeli's avatar
Andrea Arcangeli committed
375
376
377
static inline void compound_lock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
378
	VM_BUG_ON_PAGE(PageSlab(page), page);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
379
380
381
382
383
384
385
	bit_spin_lock(PG_compound_lock, &page->flags);
#endif
}

static inline void compound_unlock(struct page *page)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
386
	VM_BUG_ON_PAGE(PageSlab(page), page);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
	bit_spin_unlock(PG_compound_lock, &page->flags);
#endif
}

static inline unsigned long compound_lock_irqsave(struct page *page)
{
	unsigned long uninitialized_var(flags);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	local_irq_save(flags);
	compound_lock(page);
#endif
	return flags;
}

static inline void compound_unlock_irqrestore(struct page *page,
					      unsigned long flags)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	compound_unlock(page);
	local_irq_restore(flags);
#endif
}

410
411
static inline struct page *compound_head(struct page *page)
{
David Rientjes's avatar
David Rientjes committed
412
413
414
415
416
417
418
419
420
421
422
423
	if (unlikely(PageTail(page))) {
		struct page *head = page->first_page;

		/*
		 * page->first_page may be a dangling pointer to an old
		 * compound page, so recheck that it is still a tail
		 * page before returning.
		 */
		smp_rmb();
		if (likely(PageTail(page)))
			return head;
	}
424
425
426
	return page;
}

427
428
429
430
431
/*
 * The atomic page->_mapcount, starts from -1: so that transitions
 * both from it and to it can be tracked, using atomic_inc_and_test
 * and atomic_add_negative(-1).
 */
432
static inline void page_mapcount_reset(struct page *page)
433
434
435
436
437
438
439
440
441
{
	atomic_set(&(page)->_mapcount, -1);
}

static inline int page_mapcount(struct page *page)
{
	return atomic_read(&(page)->_mapcount) + 1;
}

442
static inline int page_count(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
443
{
444
	return atomic_read(&compound_head(page)->_count);
Linus Torvalds's avatar
Linus Torvalds committed
445
446
}

447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
#ifdef CONFIG_HUGETLB_PAGE
extern int PageHeadHuge(struct page *page_head);
#else /* CONFIG_HUGETLB_PAGE */
static inline int PageHeadHuge(struct page *page_head)
{
	return 0;
}
#endif /* CONFIG_HUGETLB_PAGE */

static inline bool __compound_tail_refcounted(struct page *page)
{
	return !PageSlab(page) && !PageHeadHuge(page);
}

/*
 * This takes a head page as parameter and tells if the
 * tail page reference counting can be skipped.
 *
 * For this to be safe, PageSlab and PageHeadHuge must remain true on
 * any given page where they return true here, until all tail pins
 * have been released.
 */
static inline bool compound_tail_refcounted(struct page *page)
{
471
	VM_BUG_ON_PAGE(!PageHead(page), page);
472
473
474
	return __compound_tail_refcounted(page);
}

475
476
477
static inline void get_huge_page_tail(struct page *page)
{
	/*
478
	 * __split_huge_page_refcount() cannot run from under us.
479
	 */
480
481
482
	VM_BUG_ON_PAGE(!PageTail(page), page);
	VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
	VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
483
	if (compound_tail_refcounted(page->first_page))
484
		atomic_inc(&page->_mapcount);
485
486
}

487
488
extern bool __get_page_tail(struct page *page);

Linus Torvalds's avatar
Linus Torvalds committed
489
490
static inline void get_page(struct page *page)
{
491
492
493
	if (unlikely(PageTail(page)))
		if (likely(__get_page_tail(page)))
			return;
494
495
	/*
	 * Getting a normal page or the head of a compound page
496
	 * requires to already have an elevated page->_count.
497
	 */
498
	VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
Linus Torvalds's avatar
Linus Torvalds committed
499
500
501
	atomic_inc(&page->_count);
}

502
503
504
505
506
507
static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
	return compound_head(page);
}

508
509
510
511
512
513
514
515
516
/*
 * Setup the page count before being freed into the page allocator for
 * the first time (boot or memory hotplug)
 */
static inline void init_page_count(struct page *page)
{
	atomic_set(&page->_count, 1);
}

Andrea Arcangeli's avatar
Andrea Arcangeli committed
517
518
519
/*
 * PageBuddy() indicate that the page is free and in the buddy system
 * (see mm/page_alloc.c).
520
521
522
523
524
 *
 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
 * -2 so that an underflow of the page_mapcount() won't be mistaken
 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
 * efficiently by most CPU architectures.
Andrea Arcangeli's avatar
Andrea Arcangeli committed
525
 */
526
527
#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)

Andrea Arcangeli's avatar
Andrea Arcangeli committed
528
529
static inline int PageBuddy(struct page *page)
{
530
	return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
Andrea Arcangeli's avatar
Andrea Arcangeli committed
531
532
533
534
}

static inline void __SetPageBuddy(struct page *page)
{
535
	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
536
	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
537
538
539
540
}

static inline void __ClearPageBuddy(struct page *page)
{
541
	VM_BUG_ON_PAGE(!PageBuddy(page), page);
Andrea Arcangeli's avatar
Andrea Arcangeli committed
542
543
544
	atomic_set(&page->_mapcount, -1);
}

Linus Torvalds's avatar
Linus Torvalds committed
545
void put_page(struct page *page);
546
void put_pages_list(struct list_head *pages);
Linus Torvalds's avatar
Linus Torvalds committed
547

548
void split_page(struct page *page, unsigned int order);
549
int split_free_page(struct page *page);
550

551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
/*
 * Compound pages have a destructor function.  Provide a
 * prototype for that function and accessor functions.
 * These are _only_ valid on the head of a PG_compound page.
 */
typedef void compound_page_dtor(struct page *);

static inline void set_compound_page_dtor(struct page *page,
						compound_page_dtor *dtor)
{
	page[1].lru.next = (void *)dtor;
}

static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
	return (compound_page_dtor *)page[1].lru.next;
}

569
570
static inline int compound_order(struct page *page)
{
571
	if (!PageHead(page))
572
573
574
575
576
577
578
579
580
		return 0;
	return (unsigned long)page[1].lru.prev;
}

static inline void set_compound_order(struct page *page, unsigned long order)
{
	page[1].lru.prev = (void *)order;
}

581
#ifdef CONFIG_MMU
Andrea Arcangeli's avatar
Andrea Arcangeli committed
582
583
584
585
586
587
588
589
590
591
592
593
/*
 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 * servicing faults for write access.  In the normal case, do always want
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
	if (likely(vma->vm_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
594
595
596

void do_set_pte(struct vm_area_struct *vma, unsigned long address,
		struct page *page, pte_t *pte, bool write, bool anon);
597
#endif
Andrea Arcangeli's avatar
Andrea Arcangeli committed
598

Linus Torvalds's avatar
Linus Torvalds committed
599
600
601
602
603
604
605
/*
 * Multiple processes may "see" the same page. E.g. for untouched
 * mappings of /dev/null, all processes see the same page full of
 * zeroes, and text pages of executables and shared libraries have
 * only one copy in memory, at most, normally.
 *
 * For the non-reserved pages, page_count(page) denotes a reference count.
606
607
 *   page_count() == 0 means the page is free. page->lru is then used for
 *   freelist management in the buddy allocator.
Nick Piggin's avatar
Nick Piggin committed
608
 *   page_count() > 0  means the page has been allocated.
Linus Torvalds's avatar
Linus Torvalds committed
609
 *
Nick Piggin's avatar
Nick Piggin committed
610
611
612
613
614
 * Pages are allocated by the slab allocator in order to provide memory
 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 * unless a particular usage is carefully commented. (the responsibility of
 * freeing the kmalloc memory is the caller's, of course).
Linus Torvalds's avatar
Linus Torvalds committed
615
 *
Nick Piggin's avatar
Nick Piggin committed
616
617
618
619
620
621
622
623
624
 * A page may be used by anyone else who does a __get_free_page().
 * In this case, page_count still tracks the references, and should only
 * be used through the normal accessor functions. The top bits of page->flags
 * and page->virtual store page management information, but all other fields
 * are unused and could be used privately, carefully. The management of this
 * page is the responsibility of the one who allocated it, and those who have
 * subsequently been given references to it.
 *
 * The other pages (we may call them "pagecache pages") are completely
Linus Torvalds's avatar
Linus Torvalds committed
625
626
627
 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 * The following discussion applies only to them.
 *
Nick Piggin's avatar
Nick Piggin committed
628
629
630
631
 * A pagecache page contains an opaque `private' member, which belongs to the
 * page's address_space. Usually, this is the address of a circular list of
 * the page's disk buffers. PG_private must be set to tell the VM to call
 * into the filesystem to release these pages.
Linus Torvalds's avatar
Linus Torvalds committed
632
 *
Nick Piggin's avatar
Nick Piggin committed
633
634
635
 * A page may belong to an inode's memory mapping. In this case, page->mapping
 * is the pointer to the inode, and page->index is the file offset of the page,
 * in units of PAGE_CACHE_SIZE.
Linus Torvalds's avatar
Linus Torvalds committed
636
 *
Nick Piggin's avatar
Nick Piggin committed
637
638
639
 * If pagecache pages are not associated with an inode, they are said to be
 * anonymous pages. These may become associated with the swapcache, and in that
 * case PG_swapcache is set, and page->private is an offset into the swapcache.
Linus Torvalds's avatar
Linus Torvalds committed
640
 *
Nick Piggin's avatar
Nick Piggin committed
641
642
643
 * In either case (swapcache or inode backed), the pagecache itself holds one
 * reference to the page. Setting PG_private should also increment the
 * refcount. The each user mapping also has a reference to the page.
Linus Torvalds's avatar
Linus Torvalds committed
644
 *
Nick Piggin's avatar
Nick Piggin committed
645
646
647
648
 * The pagecache pages are stored in a per-mapping radix tree, which is
 * rooted at mapping->page_tree, and indexed by offset.
 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 * lists, we instead now tag pages as dirty/writeback in the radix tree.
Linus Torvalds's avatar
Linus Torvalds committed
649
 *
Nick Piggin's avatar
Nick Piggin committed
650
 * All pagecache pages may be subject to I/O:
Linus Torvalds's avatar
Linus Torvalds committed
651
652
 * - inode pages may need to be read from disk,
 * - inode pages which have been modified and are MAP_SHARED may need
Nick Piggin's avatar
Nick Piggin committed
653
654
655
656
 *   to be written back to the inode on disk,
 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 *   modified may need to be swapped out to swap space and (later) to be read
 *   back into memory.
Linus Torvalds's avatar
Linus Torvalds committed
657
658
659
660
661
662
 */

/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */
663

664
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
665
#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
666
667
#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
668
#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
669

670
/*
Lucas De Marchi's avatar
Lucas De Marchi committed
671
 * Define the bit shifts to access each section.  For non-existent
672
673
674
 * sections we define the shift as 0; that plus a 0 mask ensures
 * the compiler will optimise away reference to them.
 */
675
676
677
#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
678
#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
679

680
681
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
682
#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
683
684
#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
						SECTIONS_PGOFF : ZONES_PGOFF)
685
#else
686
#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
687
688
#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
						NODES_PGOFF : ZONES_PGOFF)
689
690
#endif

691
#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
692

693
694
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
695
696
#endif

697
698
699
#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
700
#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
701
#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
702

703
static inline enum zone_type page_zonenum(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
704
{
705
	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
706
707
}

Cody P Schafer's avatar
Cody P Schafer committed
708
709
710
711
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif

712
/*
713
714
715
716
717
718
 * The identification function is mainly used by the buddy allocator for
 * determining if two pages could be buddies. We are not really identifying
 * the zone since we could be using the section number id if we do not have
 * node id available in page flags.
 * We only guarantee that it will return the same value for two combinable
 * pages in a zone.
719
 */
720
721
static inline int page_zone_id(struct page *page)
{
722
	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
723
724
}

725
static inline int zone_to_nid(struct zone *zone)
726
{
727
728
729
730
731
#ifdef CONFIG_NUMA
	return zone->node;
#else
	return 0;
#endif
732
733
}

734
#ifdef NODE_NOT_IN_PAGE_FLAGS
735
extern int page_to_nid(const struct page *page);
736
#else
737
static inline int page_to_nid(const struct page *page)
738
{
739
	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
740
}
741
742
#endif

743
#ifdef CONFIG_NUMA_BALANCING
744
static inline int cpu_pid_to_cpupid(int cpu, int pid)
745
{
746
	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
747
748
}

749
static inline int cpupid_to_pid(int cpupid)
750
{
751
	return cpupid & LAST__PID_MASK;
752
}
753

754
static inline int cpupid_to_cpu(int cpupid)
755
{
756
	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
757
758
}

759
static inline int cpupid_to_nid(int cpupid)
760
{
761
	return cpu_to_node(cpupid_to_cpu(cpupid));
762
763
}

764
static inline bool cpupid_pid_unset(int cpupid)
765
{
766
	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
767
768
}

769
static inline bool cpupid_cpu_unset(int cpupid)
770
{
771
	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
772
773
}

774
775
776
777
778
779
static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
{
	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
}

#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
780
781
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
782
{
783
	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
784
}
785
786
787
788
789
790

static inline int page_cpupid_last(struct page *page)
{
	return page->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
791
{
792
	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
793
794
}
#else
795
static inline int page_cpupid_last(struct page *page)
796
{
797
	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
798
799
}

800
extern int page_cpupid_xchg_last(struct page *page, int cpupid);
801

802
static inline void page_cpupid_reset_last(struct page *page)
803
{
804
	int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
Mel Gorman's avatar
Mel Gorman committed
805

806
807
	page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
	page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
808
}
809
810
811
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
812
{
813
	return page_to_nid(page); /* XXX */
814
815
}

816
static inline int page_cpupid_last(struct page *page)
817
{
818
	return page_to_nid(page); /* XXX */
819
820
}

821
static inline int cpupid_to_nid(int cpupid)
822
823
824
825
{
	return -1;
}

826
static inline int cpupid_to_pid(int cpupid)
827
828
829
830
{
	return -1;
}

831
static inline int cpupid_to_cpu(int cpupid)
832
833
834
835
{
	return -1;
}

836
837
838
839
840
841
static inline int cpu_pid_to_cpupid(int nid, int pid)
{
	return -1;
}

static inline bool cpupid_pid_unset(int cpupid)
842
843
844
845
{
	return 1;
}

846
static inline void page_cpupid_reset_last(struct page *page)
847
848
{
}
849
850
851
852
853

static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
	return false;
}
854
#endif /* CONFIG_NUMA_BALANCING */
855

856
static inline struct zone *page_zone(const struct page *page)
857
858
859
860
{
	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

Cody P Schafer's avatar
Cody P Schafer committed
861
#ifdef SECTION_IN_PAGE_FLAGS
862
863
864
865
866
867
static inline void set_page_section(struct page *page, unsigned long section)
{
	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}

868
static inline unsigned long page_to_section(const struct page *page)
869
870
871
{
	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
872
#endif
873

874
static inline void set_page_zone(struct page *page, enum zone_type zone)
875
876
877
878
{
	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
879

880
881
882
883
static inline void set_page_node(struct page *page, unsigned long node)
{
	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
884
}
885

886
static inline void set_page_links(struct page *page, enum zone_type zone,
887
	unsigned long node, unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
888
{
889
890
	set_page_zone(page, zone);
	set_page_node(page, node);
Cody P Schafer's avatar
Cody P Schafer committed
891
#ifdef SECTION_IN_PAGE_FLAGS
892
	set_page_section(page, pfn_to_section_nr(pfn));
893
#endif
Linus Torvalds's avatar
Linus Torvalds committed
894
895
}

896
897
898
899
900
/*
 * Some inline functions in vmstat.h depend on page_zone()
 */
#include <linux/vmstat.h>

901
static __always_inline void *lowmem_page_address(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
902
{
903
	return __va(PFN_PHYS(page_to_pfn(page)));
Linus Torvalds's avatar
Linus Torvalds committed
904
905
906
907
908
909
910
}

#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif

#if defined(WANT_PAGE_VIRTUAL)
911
912
913
914
915
916
917
918
static inline void *page_address(const struct page *page)
{
	return page->virtual;
}
static inline void set_page_address(struct page *page, void *address)
{
	page->virtual = address;
}
Linus Torvalds's avatar
Linus Torvalds committed
919
920
921
922
#define page_address_init()  do { } while(0)
#endif

#if defined(HASHED_PAGE_VIRTUAL)
923
void *page_address(const struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
924
925
926
927
928
929
930
931
932
933
934
935
936
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif

#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address)  do { } while(0)
#define page_address_init()  do { } while(0)
#endif

/*
 * On an anonymous page mapped into a user virtual memory area,
 * page->mapping points to its anon_vma, not to a struct address_space;
Hugh Dickins's avatar
Hugh Dickins committed
937
938
939
940
941
942
943
944
 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 *
 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
 * and then page->mapping points, not to an anon_vma, but to a private
 * structure which KSM associates with that merged page.  See ksm.h.
 *
 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
Linus Torvalds's avatar
Linus Torvalds committed
945
946
947
948
949
950
 *
 * Please note that, confusingly, "page_mapping" refers to the inode
 * address_space which maps the page from disk; whereas "page_mapped"
 * refers to user virtual address space into which the page is mapped.
 */
#define PAGE_MAPPING_ANON	1
Hugh Dickins's avatar
Hugh Dickins committed
951
952
#define PAGE_MAPPING_KSM	2
#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
Linus Torvalds's avatar
Linus Torvalds committed
953

Shaohua Li's avatar
Shaohua Li committed
954
extern struct address_space *page_mapping(struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
955

Hugh Dickins's avatar
Hugh Dickins committed
956
957
958
959
960
961
/* Neutral page->mapping pointer to address_space or anon_vma or other */
static inline void *page_rmapping(struct page *page)
{
	return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
}

962
963
964
965
966
967
968
969
970
971
972
extern struct address_space *__page_file_mapping(struct page *);

static inline
struct address_space *page_file_mapping(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return __page_file_mapping(page);

	return page->mapping;
}

Linus Torvalds's avatar
Linus Torvalds committed
973
974
975
976
977
978
979
980
981
982
983
984
static inline int PageAnon(struct page *page)
{
	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
}

/*
 * Return the pagecache index of the passed page.  Regular pagecache pages
 * use ->index whereas swapcache pages use ->private
 */
static inline pgoff_t page_index(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
985
		return page_private(page);
Linus Torvalds's avatar
Linus Torvalds committed
986
987
988
	return page->index;
}

989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
extern pgoff_t __page_file_index(struct page *page);

/*
 * Return the file index of the page. Regular pagecache pages use ->index
 * whereas swapcache pages use swp_offset(->private)
 */
static inline pgoff_t page_file_index(struct page *page)
{
	if (unlikely(PageSwapCache(page)))
		return __page_file_index(page);

	return page->index;
}

Linus Torvalds's avatar
Linus Torvalds committed
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
/*
 * Return true if this page is mapped into pagetables.
 */
static inline int page_mapped(struct page *page)
{
	return atomic_read(&(page)->_mapcount) >= 0;
}

/*
 * Different kinds of faults, as returned by handle_mm_fault().
 * Used to decide whether a process gets delivered SIGBUS or
 * just gets major/minor fault counters bumped up.
 */
Nick Piggin's avatar
Nick Piggin committed
1016

Nick Piggin's avatar
Nick Piggin committed
1017
#define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */
Nick Piggin's avatar
Nick Piggin committed
1018

Nick Piggin's avatar
Nick Piggin committed
1019
1020
1021
1022
#define VM_FAULT_OOM	0x0001
#define VM_FAULT_SIGBUS	0x0002
#define VM_FAULT_MAJOR	0x0004
#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
1023
1024
#define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
Nick Piggin's avatar
Nick Piggin committed
1025

Nick Piggin's avatar
Nick Piggin committed
1026
1027
#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
1028
#define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
1029
#define VM_FAULT_FALLBACK 0x0800	/* huge page fault failed, fall back to small */
Linus Torvalds's avatar
Linus Torvalds committed
1030

1031
1032
1033
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */

#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
1034
			 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
1035
1036
1037
1038

/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
Nick Piggin's avatar
Nick Piggin committed
1039

1040
1041
1042
1043
1044
/*
 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
 */
extern void pagefault_out_of_memory(void);

Linus Torvalds's avatar
Linus Torvalds committed
1045
1046
#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)

1047
/*
1048
 * Flags passed to show_mem() and show_free_areas() to suppress output in
1049
1050
 * various contexts.
 */
1051
#define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
1052

1053
1054
extern void show_free_areas(unsigned int flags);
extern bool skip_free_areas_node(unsigned int flags, int nid);
Linus Torvalds's avatar
Linus Torvalds committed
1055
1056

int shmem_zero_setup(struct vm_area_struct *);
1057
1058
1059
1060
1061
1062
1063
1064
#ifdef CONFIG_SHMEM
bool shmem_mapping(struct address_space *mapping);
#else
static inline bool shmem_mapping(struct address_space *mapping)
{
	return false;
}
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1065

Alexey Dobriyan's avatar
Alexey Dobriyan committed
1066
extern int can_do_mlock(void);
Linus Torvalds's avatar
Linus Torvalds committed
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
extern int user_shm_lock(size_t, struct user_struct *);
extern void user_shm_unlock(size_t, struct user_struct *);

/*
 * Parameter block passed down to zap_pte_range in exceptional cases.
 */
struct zap_details {
	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
	struct address_space *check_mapping;	/* Check page->mapping if set */
	pgoff_t	first_index;			/* Lowest page->index to unmap */
	pgoff_t last_index;			/* Highest page->index to unmap */
};

1080
1081
1082
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
		pte_t pte);

1083
1084
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
		unsigned long size);
1085
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
Linus Torvalds's avatar
Linus Torvalds committed
1086
		unsigned long size, struct zap_details *);
1087
1088
void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
		unsigned long start, unsigned long end);
1089
1090
1091
1092
1093
1094

/**
 * mm_walk - callbacks for walk_page_range
 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1095
1096
1097
 *	       this handler is required to be able to handle
 *	       pmd_trans_huge() pmds.  They may simply choose to
 *	       split_huge_page() instead of handling it explicitly.
1098
1099
 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
 * @pte_hole: if set, called for each hole at all levels
1100
 * @hugetlb_entry: if set, called for each hugetlb entry
1101
1102
 *		   *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
 * 			      is used.
1103
1104
1105
1106
 *
 * (see walk_page_range for more details)
 */
struct mm_walk {
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pud_entry)(pud_t *pud, unsigned long addr,
	                 unsigned long next, struct mm_walk *walk);
	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pte_entry)(pte_t *pte, unsigned long addr,
			 unsigned long next, struct mm_walk *walk);
	int (*pte_hole)(unsigned long addr, unsigned long next,
			struct mm_walk *walk);
	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
			     unsigned long addr, unsigned long next,
			     struct mm_walk *walk);
1120
1121
	struct mm_struct *mm;
	void *private;
1122
1123
};

1124
1125
int walk_page_range(unsigned long addr, unsigned long end,
		struct mm_walk *walk);
1126
void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1127
		unsigned long end, unsigned long floor, unsigned long ceiling);
Linus Torvalds's avatar
Linus Torvalds committed
1128
1129
1130
1131
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
			struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen, int even_cows);
Johannes Weiner's avatar
Johannes Weiner committed
1132
1133
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
	unsigned long *pfn);
1134
1135
int follow_phys(struct vm_area_struct *vma, unsigned long address,
		unsigned int flags, unsigned long *prot, resource_size_t *phys);
1136
1137
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
			void *buf, int len, int write);
Linus Torvalds's avatar
Linus Torvalds committed
1138
1139
1140
1141
1142
1143
1144

static inline void unmap_shared_mapping_range(struct address_space *mapping,
		loff_t const holebegin, loff_t const holelen)
{
	unmap_mapping_range(mapping, holebegin, holelen, 0);
}

1145
extern void truncate_pagecache(struct inode *inode, loff_t new);
1146
extern void truncate_setsize(struct inode *inode, loff_t newsize);
1147
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1148
int truncate_inode_page(struct address_space *mapping, struct page *page);
1149
int generic_error_remove_page(struct address_space *mapping, struct page *page);
1150
1151
int invalidate_inode_page(struct page *page);

1152
#ifdef CONFIG_MMU
Nick Piggin's avatar
Nick Piggin committed
1153
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1154
			unsigned long address, unsigned int flags);
1155
1156
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
			    unsigned long address, unsigned int fault_flags);
1157
1158
1159
#else
static inline int handle_mm_fault(struct mm_struct *mm,
			struct vm_area_struct *vma, unsigned long address,
1160
			unsigned int flags)
1161
1162
1163
1164
1165
{
	/* should never happen if there's no MMU */
	BUG();
	return VM_FAULT_SIGBUS;
}
1166
1167
1168
1169
1170
1171
1172
1173
static inline int fixup_user_fault(struct task_struct *tsk,
		struct mm_struct *mm, unsigned long address,