mm.h 74.2 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3 4 5 6 7
#ifndef _LINUX_MM_H
#define _LINUX_MM_H

#include <linux/errno.h>

#ifdef __KERNEL__

8
#include <linux/mmdebug.h>
Linus Torvalds's avatar
Linus Torvalds committed
9
#include <linux/gfp.h>
10
#include <linux/bug.h>
Linus Torvalds's avatar
Linus Torvalds committed
11 12 13
#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
14
#include <linux/atomic.h>
15
#include <linux/debug_locks.h>
16
#include <linux/mm_types.h>
17
#include <linux/range.h>
18
#include <linux/pfn.h>
19
#include <linux/percpu-refcount.h>
Andrea Arcangeli's avatar
Andrea Arcangeli committed
20
#include <linux/bit_spinlock.h>
21
#include <linux/shrinker.h>
22
#include <linux/resource.h>
23
#include <linux/page_ext.h>
24
#include <linux/err.h>
Linus Torvalds's avatar
Linus Torvalds committed
25 26 27

struct mempolicy;
struct anon_vma;
28
struct anon_vma_chain;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
29
struct file_ra_state;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
30
struct user_struct;
Alexey Dobriyan's avatar
Alexey Dobriyan committed
31
struct writeback_control;
32
struct bdi_writeback;
Linus Torvalds's avatar
Linus Torvalds committed
33

34
#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
Linus Torvalds's avatar
Linus Torvalds committed
35
extern unsigned long max_mapnr;
36 37 38 39 40 41 42

static inline void set_max_mapnr(unsigned long limit)
{
	max_mapnr = limit;
}
#else
static inline void set_max_mapnr(unsigned long limit) { }
Linus Torvalds's avatar
Linus Torvalds committed
43 44
#endif

45
extern unsigned long totalram_pages;
Linus Torvalds's avatar
Linus Torvalds committed
46 47 48 49 50 51 52 53 54
extern void * high_memory;
extern int page_cluster;

#ifdef CONFIG_SYSCTL
extern int sysctl_legacy_va_layout;
#else
#define sysctl_legacy_va_layout 0
#endif

55 56 57 58 59 60 61 62 63 64 65
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
extern const int mmap_rnd_bits_min;
extern const int mmap_rnd_bits_max;
extern int mmap_rnd_bits __read_mostly;
#endif
#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
extern const int mmap_rnd_compat_bits_min;
extern const int mmap_rnd_compat_bits_max;
extern int mmap_rnd_compat_bits __read_mostly;
#endif

Linus Torvalds's avatar
Linus Torvalds committed
66 67 68 69
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>

70 71 72 73
#ifndef __pa_symbol
#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
#endif

74 75 76 77 78 79 80 81 82 83 84
/*
 * To prevent common memory management code establishing
 * a zero page mapping on a read fault.
 * This macro should be defined within <asm/pgtable.h>.
 * s390 does this to prevent multiplexing of hardware bits
 * related to the physical page in case of virtualization.
 */
#ifndef mm_forbids_zeropage
#define mm_forbids_zeropage(X)	(0)
#endif

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
/*
 * Default maximum number of active map areas, this limits the number of vmas
 * per mm struct. Users can overwrite this number by sysctl but there is a
 * problem.
 *
 * When a program's coredump is generated as ELF format, a section is created
 * per a vma. In ELF, the number of sections is represented in unsigned short.
 * This means the number of sections should be smaller than 65535 at coredump.
 * Because the kernel adds some informative sections to a image of program at
 * generating coredump, we need some margin. The number of extra sections is
 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 *
 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
 * not a hard limit any more. Although some userspace tools can be surprised by
 * that.
 */
#define MAPCOUNT_ELF_CORE_MARGIN	(5)
#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)

extern int sysctl_max_map_count;

106
extern unsigned long sysctl_user_reserve_kbytes;
107
extern unsigned long sysctl_admin_reserve_kbytes;
108

109 110 111 112 113 114 115 116 117
extern int sysctl_overcommit_memory;
extern int sysctl_overcommit_ratio;
extern unsigned long sysctl_overcommit_kbytes;

extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);
extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
				    size_t *, loff_t *);

Linus Torvalds's avatar
Linus Torvalds committed
118 119
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))

120 121 122
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)

123 124 125
/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)addr, PAGE_SIZE)

Linus Torvalds's avatar
Linus Torvalds committed
126 127 128 129 130 131 132 133 134
/*
 * Linux kernel virtual memory manager primitives.
 * The idea being to have a "virtual" mm in the same way
 * we have a virtual fs - giving a cleaner interface to the
 * mm details, and allowing different kinds of memory mappings
 * (from shared memory to executable loading to arbitrary
 * mmap() functions).
 */

135 136
extern struct kmem_cache *vm_area_cachep;

Linus Torvalds's avatar
Linus Torvalds committed
137
#ifndef CONFIG_MMU
138 139
extern struct rb_root nommu_region_tree;
extern struct rw_semaphore nommu_region_sem;
Linus Torvalds's avatar
Linus Torvalds committed
140 141 142 143 144

extern unsigned int kobjsize(const void *objp);
#endif

/*
Hugh Dickins's avatar
Hugh Dickins committed
145
 * vm_flags in vm_area_struct, see mm_types.h.
Linus Torvalds's avatar
Linus Torvalds committed
146
 */
147 148
#define VM_NONE		0x00000000

Linus Torvalds's avatar
Linus Torvalds committed
149 150 151 152 153
#define VM_READ		0x00000001	/* currently active flags */
#define VM_WRITE	0x00000002
#define VM_EXEC		0x00000004
#define VM_SHARED	0x00000008

154
/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
Linus Torvalds's avatar
Linus Torvalds committed
155 156 157 158 159 160
#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
#define VM_MAYWRITE	0x00000020
#define VM_MAYEXEC	0x00000040
#define VM_MAYSHARE	0x00000080

#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
161
#define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
162
#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
Linus Torvalds's avatar
Linus Torvalds committed
163
#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
164
#define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
Linus Torvalds's avatar
Linus Torvalds committed
165 166 167 168 169 170 171 172 173 174

#define VM_LOCKED	0x00002000
#define VM_IO           0x00004000	/* Memory mapped I/O or similar */

					/* Used by sys_madvise() */
#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */

#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
Eric B Munson's avatar
Eric B Munson committed
175
#define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
Linus Torvalds's avatar
Linus Torvalds committed
176
#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
177
#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
Linus Torvalds's avatar
Linus Torvalds committed
178
#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
179
#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
180
#define VM_ARCH_2	0x02000000
181
#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
182

183 184 185 186 187 188
#ifdef CONFIG_MEM_SOFT_DIRTY
# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
#else
# define VM_SOFTDIRTY	0
#endif

Jared Hulbert's avatar
Jared Hulbert committed
189
#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
190 191
#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
Hugh Dickins's avatar
Hugh Dickins committed
192
#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
Linus Torvalds's avatar
Linus Torvalds committed
193

194 195 196 197 198 199
#if defined(CONFIG_X86)
# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP	VM_ARCH_1
200 201
#elif defined(CONFIG_METAG)
# define VM_GROWSUP	VM_ARCH_1
202 203 204 205 206 207
#elif defined(CONFIG_IA64)
# define VM_GROWSUP	VM_ARCH_1
#elif !defined(CONFIG_MMU)
# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
#endif

208 209 210 211 212
#if defined(CONFIG_X86)
/* MPX specific bounds table or bounds directory */
# define VM_MPX		VM_ARCH_2
#endif

213 214 215 216
#ifndef VM_GROWSUP
# define VM_GROWSUP	VM_NONE
#endif

217 218 219
/* Bits set in the VMA until the stack is in its final location */
#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)

Linus Torvalds's avatar
Linus Torvalds committed
220 221 222 223 224
#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif

#ifdef CONFIG_STACK_GROWSUP
225
#define VM_STACK	VM_GROWSUP
Linus Torvalds's avatar
Linus Torvalds committed
226
#else
227
#define VM_STACK	VM_GROWSDOWN
Linus Torvalds's avatar
Linus Torvalds committed
228 229
#endif

230 231
#define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)

232
/*
233 234
 * Special vmas that are non-mergable, non-mlock()able.
 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
235
 */
236
#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
237

238 239 240
/* This mask defines which mm->def_flags a process can inherit its parent */
#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE

Eric B Munson's avatar
Eric B Munson committed
241 242 243
/* This mask is used to clear all the VMA flags used by mlock */
#define VM_LOCKED_CLEAR_MASK	(~(VM_LOCKED | VM_LOCKONFAULT))

Linus Torvalds's avatar
Linus Torvalds committed
244 245 246 247 248 249
/*
 * mapping from the currently active vm_flags protection bits (the
 * low four bits) to a page protection mask..
 */
extern pgprot_t protection_map[16];

Nick Piggin's avatar
Nick Piggin committed
250
#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
251 252 253 254 255 256
#define FAULT_FLAG_MKWRITE	0x02	/* Fault was mkwrite of existing pte */
#define FAULT_FLAG_ALLOW_RETRY	0x04	/* Retry fault if blocking */
#define FAULT_FLAG_RETRY_NOWAIT	0x08	/* Don't drop mmap_sem and wait when retrying */
#define FAULT_FLAG_KILLABLE	0x10	/* The fault task is in SIGKILL killable region */
#define FAULT_FLAG_TRIED	0x20	/* Second try */
#define FAULT_FLAG_USER		0x40	/* The fault originated in userspace */
Nick Piggin's avatar
Nick Piggin committed
257

258
/*
Nick Piggin's avatar
Nick Piggin committed
259
 * vm_fault is filled by the the pagefault handler and passed to the vma's
Nick Piggin's avatar
Nick Piggin committed
260 261
 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 * of VM_FAULT_xxx flags that give details about how the fault was handled.
262
 *
263 264 265
 * MM layer fills up gfp_mask for page allocations but fault handler might
 * alter it if its implementation requires a different allocation context.
 *
266
 * pgoff should be used in favour of virtual_address, if possible.
267
 */
Nick Piggin's avatar
Nick Piggin committed
268 269
struct vm_fault {
	unsigned int flags;		/* FAULT_FLAG_xxx flags */
270
	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
Nick Piggin's avatar
Nick Piggin committed
271 272 273
	pgoff_t pgoff;			/* Logical page offset based on vma */
	void __user *virtual_address;	/* Faulting virtual address */

274
	struct page *cow_page;		/* Handler may choose to COW */
Nick Piggin's avatar
Nick Piggin committed
275
	struct page *page;		/* ->fault handlers should return a
Nick Piggin's avatar
Nick Piggin committed
276
					 * page here, unless VM_FAULT_NOPAGE
Nick Piggin's avatar
Nick Piggin committed
277
					 * is set (which is also implied by
Nick Piggin's avatar
Nick Piggin committed
278
					 * VM_FAULT_ERROR).
Nick Piggin's avatar
Nick Piggin committed
279
					 */
280 281 282 283
	/* for ->map_pages() only */
	pgoff_t max_pgoff;		/* map pages for offset from pgoff till
					 * max_pgoff inclusive */
	pte_t *pte;			/* pte entry associated with ->pgoff */
284
};
Linus Torvalds's avatar
Linus Torvalds committed
285 286 287 288 289 290 291 292 293

/*
 * These are the virtual MM functions - opening of an area, closing and
 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 * to the functions called when a no-page or a wp-page exception occurs. 
 */
struct vm_operations_struct {
	void (*open)(struct vm_area_struct * area);
	void (*close)(struct vm_area_struct * area);
294
	int (*mremap)(struct vm_area_struct * area);
Nick Piggin's avatar
Nick Piggin committed
295
	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
Matthew Wilcox's avatar
Matthew Wilcox committed
296 297
	int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
						pmd_t *, unsigned int flags);
298
	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
299 300 301

	/* notification that a previously read-only page is about to become
	 * writable, if an error is returned it will cause a SIGBUS */
302
	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
303

304 305 306
	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
	int (*pfn_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);

307 308 309 310 311
	/* called by access_process_vm when get_user_pages() fails, typically
	 * for use by special VMAs that can switch between memory and hardware
	 */
	int (*access)(struct vm_area_struct *vma, unsigned long addr,
		      void *buf, int len, int write);
312 313 314 315 316 317

	/* Called by the /proc/PID/maps code to ask the vma whether it
	 * has a special name.  Returning non-NULL will also cause this
	 * vma to be dumped unconditionally. */
	const char *(*name)(struct vm_area_struct *vma);

Linus Torvalds's avatar
Linus Torvalds committed
318
#ifdef CONFIG_NUMA
319 320 321 322 323 324 325
	/*
	 * set_policy() op must add a reference to any non-NULL @new mempolicy
	 * to hold the policy upon return.  Caller should pass NULL @new to
	 * remove a policy and fall back to surrounding context--i.e. do not
	 * install a MPOL_DEFAULT policy, nor the task or system default
	 * mempolicy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
326
	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
327 328 329 330 331 332 333 334 335 336 337

	/*
	 * get_policy() op must add reference [mpol_get()] to any policy at
	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
	 * in mm/mempolicy.c will do this automatically.
	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
	 * must return NULL--i.e., do not "fallback" to task or system default
	 * policy.
	 */
Linus Torvalds's avatar
Linus Torvalds committed
338 339 340
	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
					unsigned long addr);
#endif
341 342 343 344 345 346 347
	/*
	 * Called by vm_normal_page() for special PTEs to find the
	 * page for @addr.  This is useful if the default behavior
	 * (using pte_page()) would not find the correct page.
	 */
	struct page *(*find_special_page)(struct vm_area_struct *vma,
					  unsigned long addr);
Linus Torvalds's avatar
Linus Torvalds committed
348 349 350 351 352
};

struct mmu_gather;
struct inode;

Andrew Morton's avatar
Andrew Morton committed
353 354
#define page_private(page)		((page)->private)
#define set_page_private(page, v)	((page)->private = (v))
355

356 357 358 359 360 361 362
#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
static inline int pmd_devmap(pmd_t pmd)
{
	return 0;
}
#endif

Linus Torvalds's avatar
Linus Torvalds committed
363 364 365 366 367
/*
 * FIXME: take this include out, include page-flags.h in
 * files which need it (119 of them)
 */
#include <linux/page-flags.h>
368
#include <linux/huge_mm.h>
Linus Torvalds's avatar
Linus Torvalds committed
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383

/*
 * Methods to modify the page usage count.
 *
 * What counts for a page usage:
 * - cache mapping   (page->mapping)
 * - private data    (page->private)
 * - page mapped in a task's page tables, each mapping
 *   is counted separately
 *
 * Also, many kernel routines increase the page count before a critical
 * routine so they can be sure the page doesn't go away from under them.
 */

/*
Nick Piggin's avatar
Nick Piggin committed
384
 * Drop a ref, return true if the refcount fell to zero (the page has no users)
Linus Torvalds's avatar
Linus Torvalds committed
385
 */
386 387
static inline int put_page_testzero(struct page *page)
{
388
	VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
389
	return atomic_dec_and_test(&page->_count);
390
}
Linus Torvalds's avatar
Linus Torvalds committed
391 392

/*
393 394
 * Try to grab a ref unless the page has a refcount of zero, return false if
 * that is the case.
395 396
 * This can be called when MMU is off so it must not access
 * any of the virtual mappings.
Linus Torvalds's avatar
Linus Torvalds committed
397
 */
398 399
static inline int get_page_unless_zero(struct page *page)
{
400
	return atomic_inc_not_zero(&page->_count);
401
}
Linus Torvalds's avatar
Linus Torvalds committed
402

403
extern int page_is_ram(unsigned long pfn);
404 405 406 407 408 409 410

enum {
	REGION_INTERSECTS,
	REGION_DISJOINT,
	REGION_MIXED,
};

411 412
int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
		      unsigned long desc);
413

414
/* Support for virtually mapped pages */
415 416
struct page *vmalloc_to_page(const void *addr);
unsigned long vmalloc_to_pfn(const void *addr);
417

418 419 420 421 422 423
/*
 * Determine if an address is within the vmalloc range
 *
 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 * is no special casing required.
 */
424 425
static inline int is_vmalloc_addr(const void *x)
{
426
#ifdef CONFIG_MMU
427 428 429
	unsigned long addr = (unsigned long)x;

	return addr >= VMALLOC_START && addr < VMALLOC_END;
430 431
#else
	return 0;
432
#endif
433
}
434 435 436
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
437
static inline int is_vmalloc_or_module_addr(const void *x)
438 439 440 441
{
	return 0;
}
#endif
442

Al Viro's avatar
Al Viro committed
443 444
extern void kvfree(const void *addr);

445 446 447 448 449 450 451 452 453 454 455 456 457
static inline atomic_t *compound_mapcount_ptr(struct page *page)
{
	return &page[1].compound_mapcount;
}

static inline int compound_mapcount(struct page *page)
{
	if (!PageCompound(page))
		return 0;
	page = compound_head(page);
	return atomic_read(compound_mapcount_ptr(page)) + 1;
}

458 459 460 461 462
/*
 * The atomic page->_mapcount, starts from -1: so that transitions
 * both from it and to it can be tracked, using atomic_inc_and_test
 * and atomic_add_negative(-1).
 */
463
static inline void page_mapcount_reset(struct page *page)
464 465 466 467
{
	atomic_set(&(page)->_mapcount, -1);
}

468 469
int __page_mapcount(struct page *page);

470 471
static inline int page_mapcount(struct page *page)
{
472
	VM_BUG_ON_PAGE(PageSlab(page), page);
473

474 475 476 477 478 479 480 481 482 483 484
	if (unlikely(PageCompound(page)))
		return __page_mapcount(page);
	return atomic_read(&page->_mapcount) + 1;
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
#else
static inline int total_mapcount(struct page *page)
{
	return page_mapcount(page);
485
}
486
#endif
487

488
static inline int page_count(struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
489
{
490
	return atomic_read(&compound_head(page)->_count);
Linus Torvalds's avatar
Linus Torvalds committed
491 492
}

493 494 495
static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);
496

497
	return compound_head(page);
498 499
}

500 501 502 503 504 505 506 507 508
/*
 * Setup the page count before being freed into the page allocator for
 * the first time (boot or memory hotplug)
 */
static inline void init_page_count(struct page *page)
{
	atomic_set(&page->_count, 1);
}

509 510
void __put_page(struct page *page);

511
void put_pages_list(struct list_head *pages);
Linus Torvalds's avatar
Linus Torvalds committed
512

513
void split_page(struct page *page, unsigned int order);
514
int split_free_page(struct page *page);
515

516 517 518
/*
 * Compound pages have a destructor function.  Provide a
 * prototype for that function and accessor functions.
519
 * These are _only_ valid on the head of a compound page.
520
 */
521 522 523 524 525 526 527 528
typedef void compound_page_dtor(struct page *);

/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
enum compound_dtor_id {
	NULL_COMPOUND_DTOR,
	COMPOUND_PAGE_DTOR,
#ifdef CONFIG_HUGETLB_PAGE
	HUGETLB_PAGE_DTOR,
529 530 531
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	TRANSHUGE_PAGE_DTOR,
532 533 534 535
#endif
	NR_COMPOUND_DTORS,
};
extern compound_page_dtor * const compound_page_dtors[];
536 537

static inline void set_compound_page_dtor(struct page *page,
538
		enum compound_dtor_id compound_dtor)
539
{
540 541
	VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
	page[1].compound_dtor = compound_dtor;
542 543 544 545
}

static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
{
546 547
	VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
	return compound_page_dtors[page[1].compound_dtor];
548 549
}

550
static inline unsigned int compound_order(struct page *page)
551
{
552
	if (!PageHead(page))
553
		return 0;
554
	return page[1].compound_order;
555 556
}

557
static inline void set_compound_order(struct page *page, unsigned int order)
558
{
559
	page[1].compound_order = order;
560 561
}

562 563
void free_compound_page(struct page *page);

564
#ifdef CONFIG_MMU
Andrea Arcangeli's avatar
Andrea Arcangeli committed
565 566 567 568 569 570 571 572 573 574 575 576
/*
 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 * servicing faults for write access.  In the normal case, do always want
 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 * that do not have writing enabled, when used by access_process_vm.
 */
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
	if (likely(vma->vm_flags & VM_WRITE))
		pte = pte_mkwrite(pte);
	return pte;
}
577 578 579

void do_set_pte(struct vm_area_struct *vma, unsigned long address,
		struct page *page, pte_t *pte, bool write, bool anon);
580
#endif
Andrea Arcangeli's avatar
Andrea Arcangeli committed
581

Linus Torvalds's avatar
Linus Torvalds committed
582 583 584 585 586 587 588
/*
 * Multiple processes may "see" the same page. E.g. for untouched
 * mappings of /dev/null, all processes see the same page full of
 * zeroes, and text pages of executables and shared libraries have
 * only one copy in memory, at most, normally.
 *
 * For the non-reserved pages, page_count(page) denotes a reference count.
589 590
 *   page_count() == 0 means the page is free. page->lru is then used for
 *   freelist management in the buddy allocator.
Nick Piggin's avatar
Nick Piggin committed
591
 *   page_count() > 0  means the page has been allocated.
Linus Torvalds's avatar
Linus Torvalds committed
592
 *
Nick Piggin's avatar
Nick Piggin committed
593 594 595 596 597
 * Pages are allocated by the slab allocator in order to provide memory
 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 * unless a particular usage is carefully commented. (the responsibility of
 * freeing the kmalloc memory is the caller's, of course).
Linus Torvalds's avatar
Linus Torvalds committed
598
 *
Nick Piggin's avatar
Nick Piggin committed
599 600 601 602 603 604 605 606 607
 * A page may be used by anyone else who does a __get_free_page().
 * In this case, page_count still tracks the references, and should only
 * be used through the normal accessor functions. The top bits of page->flags
 * and page->virtual store page management information, but all other fields
 * are unused and could be used privately, carefully. The management of this
 * page is the responsibility of the one who allocated it, and those who have
 * subsequently been given references to it.
 *
 * The other pages (we may call them "pagecache pages") are completely
Linus Torvalds's avatar
Linus Torvalds committed
608 609 610
 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 * The following discussion applies only to them.
 *
Nick Piggin's avatar
Nick Piggin committed
611 612 613 614
 * A pagecache page contains an opaque `private' member, which belongs to the
 * page's address_space. Usually, this is the address of a circular list of
 * the page's disk buffers. PG_private must be set to tell the VM to call
 * into the filesystem to release these pages.
Linus Torvalds's avatar
Linus Torvalds committed
615
 *
Nick Piggin's avatar
Nick Piggin committed
616 617 618
 * A page may belong to an inode's memory mapping. In this case, page->mapping
 * is the pointer to the inode, and page->index is the file offset of the page,
 * in units of PAGE_CACHE_SIZE.
Linus Torvalds's avatar
Linus Torvalds committed
619
 *
Nick Piggin's avatar
Nick Piggin committed
620 621 622
 * If pagecache pages are not associated with an inode, they are said to be
 * anonymous pages. These may become associated with the swapcache, and in that
 * case PG_swapcache is set, and page->private is an offset into the swapcache.
Linus Torvalds's avatar
Linus Torvalds committed
623
 *
Nick Piggin's avatar
Nick Piggin committed
624 625 626
 * In either case (swapcache or inode backed), the pagecache itself holds one
 * reference to the page. Setting PG_private should also increment the
 * refcount. The each user mapping also has a reference to the page.
Linus Torvalds's avatar
Linus Torvalds committed
627
 *
Nick Piggin's avatar
Nick Piggin committed
628 629 630 631
 * The pagecache pages are stored in a per-mapping radix tree, which is
 * rooted at mapping->page_tree, and indexed by offset.
 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 * lists, we instead now tag pages as dirty/writeback in the radix tree.
Linus Torvalds's avatar
Linus Torvalds committed
632
 *
Nick Piggin's avatar
Nick Piggin committed
633
 * All pagecache pages may be subject to I/O:
Linus Torvalds's avatar
Linus Torvalds committed
634 635
 * - inode pages may need to be read from disk,
 * - inode pages which have been modified and are MAP_SHARED may need
Nick Piggin's avatar
Nick Piggin committed
636 637 638 639
 *   to be written back to the inode on disk,
 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 *   modified may need to be swapped out to swap space and (later) to be read
 *   back into memory.
Linus Torvalds's avatar
Linus Torvalds committed
640 641 642 643 644 645
 */

/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */
646

647
/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
648
#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
649 650
#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
651
#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
652

653
/*
Lucas De Marchi's avatar
Lucas De Marchi committed
654
 * Define the bit shifts to access each section.  For non-existent
655 656 657
 * sections we define the shift as 0; that plus a 0 mask ensures
 * the compiler will optimise away reference to them.
 */
658 659 660
#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
661
#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
662

663 664
/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
#ifdef NODE_NOT_IN_PAGE_FLAGS
665
#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
666 667
#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
						SECTIONS_PGOFF : ZONES_PGOFF)
668
#else
669
#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
670 671
#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
						NODES_PGOFF : ZONES_PGOFF)
672 673
#endif

674
#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
675

676 677
#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
678 679
#endif

680 681 682
#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
683
#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
684
#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
685

686
static inline enum zone_type page_zonenum(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
687
{
688
	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
Linus Torvalds's avatar
Linus Torvalds committed
689 690
}

691
#ifdef CONFIG_ZONE_DEVICE
692 693
void get_zone_device_page(struct page *page);
void put_zone_device_page(struct page *page);
694 695 696 697 698
static inline bool is_zone_device_page(const struct page *page)
{
	return page_zonenum(page) == ZONE_DEVICE;
}
#else
699 700 701 702 703 704
static inline void get_zone_device_page(struct page *page)
{
}
static inline void put_zone_device_page(struct page *page)
{
}
705 706 707 708 709 710
static inline bool is_zone_device_page(const struct page *page)
{
	return false;
}
#endif

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735
static inline void get_page(struct page *page)
{
	page = compound_head(page);
	/*
	 * Getting a normal page or the head of a compound page
	 * requires to already have an elevated page->_count.
	 */
	VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
	atomic_inc(&page->_count);

	if (unlikely(is_zone_device_page(page)))
		get_zone_device_page(page);
}

static inline void put_page(struct page *page)
{
	page = compound_head(page);

	if (put_page_testzero(page))
		__put_page(page);

	if (unlikely(is_zone_device_page(page)))
		put_zone_device_page(page);
}

Cody P Schafer's avatar
Cody P Schafer committed
736 737 738 739
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif

740
/*
741 742 743 744 745 746
 * The identification function is mainly used by the buddy allocator for
 * determining if two pages could be buddies. We are not really identifying
 * the zone since we could be using the section number id if we do not have
 * node id available in page flags.
 * We only guarantee that it will return the same value for two combinable
 * pages in a zone.
747
 */
748 749
static inline int page_zone_id(struct page *page)
{
750
	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
751 752
}

753
static inline int zone_to_nid(struct zone *zone)
754
{
755 756 757 758 759
#ifdef CONFIG_NUMA
	return zone->node;
#else
	return 0;
#endif
760 761
}

762
#ifdef NODE_NOT_IN_PAGE_FLAGS
763
extern int page_to_nid(const struct page *page);
764
#else
765
static inline int page_to_nid(const struct page *page)
766
{
767
	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
768
}
769 770
#endif

771
#ifdef CONFIG_NUMA_BALANCING
772
static inline int cpu_pid_to_cpupid(int cpu, int pid)
773
{
774
	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
775 776
}

777
static inline int cpupid_to_pid(int cpupid)
778
{
779
	return cpupid & LAST__PID_MASK;
780
}
781

782
static inline int cpupid_to_cpu(int cpupid)
783
{
784
	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
785 786
}

787
static inline int cpupid_to_nid(int cpupid)
788
{
789
	return cpu_to_node(cpupid_to_cpu(cpupid));
790 791
}

792
static inline bool cpupid_pid_unset(int cpupid)
793
{
794
	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
795 796
}

797
static inline bool cpupid_cpu_unset(int cpupid)
798
{
799
	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
800 801
}

802 803 804 805 806 807
static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
{
	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
}

#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
808 809
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
810
{
811
	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
812
}
813 814 815 816 817 818

static inline int page_cpupid_last(struct page *page)
{
	return page->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
819
{
820
	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
821 822
}
#else
823
static inline int page_cpupid_last(struct page *page)
824
{
825
	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
826 827
}

828
extern int page_cpupid_xchg_last(struct page *page, int cpupid);
829

830
static inline void page_cpupid_reset_last(struct page *page)
831
{
832
	int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
Mel Gorman's avatar
Mel Gorman committed
833

834 835
	page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
	page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
836
}
837 838 839
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
#else /* !CONFIG_NUMA_BALANCING */
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
840
{
841
	return page_to_nid(page); /* XXX */
842 843
}

844
static inline int page_cpupid_last(struct page *page)
845
{
846
	return page_to_nid(page); /* XXX */
847 848
}

849
static inline int cpupid_to_nid(int cpupid)
850 851 852 853
{
	return -1;
}

854
static inline int cpupid_to_pid(int cpupid)
855 856 857 858
{
	return -1;
}

859
static inline int cpupid_to_cpu(int cpupid)
860 861 862 863
{
	return -1;
}

864 865 866 867 868 869
static inline int cpu_pid_to_cpupid(int nid, int pid)
{
	return -1;
}

static inline bool cpupid_pid_unset(int cpupid)
870 871 872 873
{
	return 1;
}

874
static inline void page_cpupid_reset_last(struct page *page)
875 876
{
}
877 878 879 880 881

static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
{
	return false;
}
882
#endif /* CONFIG_NUMA_BALANCING */
883

884
static inline struct zone *page_zone(const struct page *page)
885 886 887 888
{
	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
}

Cody P Schafer's avatar
Cody P Schafer committed
889
#ifdef SECTION_IN_PAGE_FLAGS
890 891 892 893 894 895
static inline void set_page_section(struct page *page, unsigned long section)
{
	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
}

896
static inline unsigned long page_to_section(const struct page *page)
897 898 899
{
	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
}
900
#endif
901

902
static inline void set_page_zone(struct page *page, enum zone_type zone)
903 904 905 906
{
	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
}
907

908 909 910 911
static inline void set_page_node(struct page *page, unsigned long node)
{
	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
Linus Torvalds's avatar
Linus Torvalds committed
912
}
913

914
static inline void set_page_links(struct page *page, enum zone_type zone,
915
	unsigned long node, unsigned long pfn)
Linus Torvalds's avatar
Linus Torvalds committed
916
{
917 918
	set_page_zone(page, zone);
	set_page_node(page, node);
Cody P Schafer's avatar
Cody P Schafer committed
919
#ifdef SECTION_IN_PAGE_FLAGS
920
	set_page_section(page, pfn_to_section_nr(pfn));
921
#endif
Linus Torvalds's avatar
Linus Torvalds committed
922 923
}

Greg Thelen's avatar
Greg Thelen committed
924 925 926 927 928 929 930 931 932 933 934 935
#ifdef CONFIG_MEMCG
static inline struct mem_cgroup *page_memcg(struct page *page)
{
	return page->mem_cgroup;
}
#else
static inline struct mem_cgroup *page_memcg(struct page *page)
{
	return NULL;
}
#endif

936 937 938 939 940
/*
 * Some inline functions in vmstat.h depend on page_zone()
 */
#include <linux/vmstat.h>

941
static __always_inline void *lowmem_page_address(const struct page *page)
Linus Torvalds's avatar
Linus Torvalds committed
942
{
943
	return __va(PFN_PHYS(page_to_pfn(page)));
Linus Torvalds's avatar
Linus Torvalds committed
944 945 946 947 948 949 950
}

#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
#define HASHED_PAGE_VIRTUAL
#endif

#if defined(WANT_PAGE_VIRTUAL)
951 952 953 954 955 956 957 958
static inline void *page_address(const struct page *page)
{
	return page->virtual;
}
static inline void set_page_address(struct page *page, void *address)
{
	page->virtual = address;
}
Linus Torvalds's avatar
Linus Torvalds committed
959 960 961 962
#define page_address_init()  do { } while(0)
#endif

#if defined(HASHED_PAGE_VIRTUAL)
963
void *page_address(const struct page *page);
Linus Torvalds's avatar
Linus Torvalds committed
964 965 966 967 968 969 970 971 972 973
void set_page_address(struct page *page, void *virtual);
void page_address_init(void);
#endif

#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
#define page_address(page) lowmem_page_address(page)
#define set_page_address(page, address)  do { } while(0)
#define page_address_init()  do { } while(0)
#endif