hugetlb.h 15.4 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1 2 3
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H

Linus Torvalds's avatar
Linus Torvalds committed
4
#include <linux/mm_types.h>
5
#include <linux/mmdebug.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
6
#include <linux/fs.h>
7
#include <linux/hugetlb_inline.h>
8
#include <linux/cgroup.h>
9 10
#include <linux/list.h>
#include <linux/kref.h>
11
#include <asm/pgtable.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
12

13 14
struct ctl_table;
struct user_struct;
15
struct mmu_gather;
16

Linus Torvalds's avatar
Linus Torvalds committed
17 18 19
#ifdef CONFIG_HUGETLB_PAGE

#include <linux/mempolicy.h>
20
#include <linux/shm.h>
David Gibson's avatar
David Gibson committed
21
#include <asm/tlbflush.h>
Linus Torvalds's avatar
Linus Torvalds committed
22

23 24 25
struct hugepage_subpool {
	spinlock_t lock;
	long count;
26 27 28 29 30 31 32
	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
	long used_hpages;	/* Used count against maximum, includes */
				/* both alloced and reserved pages. */
	struct hstate *hstate;
	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
	long rsv_hpages;	/* Pages reserved against global pool to */
				/* sasitfy minimum size. */
33 34
};

35 36
struct resv_map {
	struct kref refs;
37
	spinlock_t lock;
38
	struct list_head regions;
39 40 41
	long adds_in_progress;
	struct list_head region_cache;
	long region_cache_count;
42 43 44 45
};
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);

46 47 48 49 50
extern spinlock_t hugetlb_lock;
extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) \
	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)

51 52
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
						long min_hpages);
53 54
void hugepage_put_subpool(struct hugepage_subpool *spool);

55
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
56 57 58
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
59 60 61 62 63 64

#ifdef CONFIG_NUMA
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
65
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
66 67
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
			 struct page **, struct vm_area_struct **,
68 69
			 unsigned long *, unsigned long *, long, unsigned int,
			 int *);
70
void unmap_hugepage_range(struct vm_area_struct *,
71
			  unsigned long, unsigned long, struct page *);
72 73 74 75
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
			  struct vm_area_struct *vma,
			  unsigned long start, unsigned long end,
			  struct page *ref_page);
76 77 78
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
				unsigned long start, unsigned long end,
				struct page *ref_page);
79
void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds's avatar
Linus Torvalds committed
80
int hugetlb_report_node_meminfo(int, char *);
81
void hugetlb_show_meminfo(void);
Linus Torvalds's avatar
Linus Torvalds committed
82
unsigned long hugetlb_total_pages(void);
83
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
84
			unsigned long address, unsigned int flags);
85 86 87 88 89
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
				struct vm_area_struct *dst_vma,
				unsigned long dst_addr,
				unsigned long src_addr,
				struct page **pagep);
90
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
91
						struct vm_area_struct *vma,
92
						vm_flags_t vm_flags);
93 94
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
						long freed);
95
int dequeue_hwpoisoned_huge_page(struct page *page);
96 97
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
98
void free_huge_page(struct page *page);
99
void hugetlb_fix_reserve_counts(struct inode *inode);
100 101 102 103 104
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
				struct vm_area_struct *vma,
				struct address_space *mapping,
				pgoff_t idx, unsigned long address);
Linus Torvalds's avatar
Linus Torvalds committed
105

106 107
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);

108
extern int hugepages_treat_as_movable;
Linus Torvalds's avatar
Linus Torvalds committed
109
extern int sysctl_hugetlb_shm_group;
110
extern struct list_head huge_boot_pages;
Linus Torvalds's avatar
Linus Torvalds committed
111

David Gibson's avatar
David Gibson committed
112 113
/* arch callbacks */

114 115
pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz);
David Gibson's avatar
David Gibson committed
116
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
117
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
David Gibson's avatar
David Gibson committed
118 119 120
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
			      int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
121
				pmd_t *pmd, int flags);
Andi Kleen's avatar
Andi Kleen committed
122
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
123
				pud_t *pud, int flags);
David Gibson's avatar
David Gibson committed
124
int pmd_huge(pmd_t pmd);
125
int pud_huge(pud_t pud);
126
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
127
		unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson's avatar
David Gibson committed
128

129
bool is_hugetlb_entry_migration(pte_t pte);
Linus Torvalds's avatar
Linus Torvalds committed
130 131
#else /* !CONFIG_HUGETLB_PAGE */

132 133 134 135
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}

Linus Torvalds's avatar
Linus Torvalds committed
136 137 138 139 140
static inline unsigned long hugetlb_total_pages(void)
{
	return 0;
}

141
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)	({ BUG(); 0; })
Linus Torvalds's avatar
Linus Torvalds committed
142 143
#define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
144 145 146
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
Linus Torvalds's avatar
Linus Torvalds committed
147
#define hugetlb_report_node_meminfo(n, buf)	0
148 149 150
static inline void hugetlb_show_meminfo(void)
{
}
151 152
#define follow_huge_pmd(mm, addr, pmd, flags)	NULL
#define follow_huge_pud(mm, addr, pud, flags)	NULL
153
#define prepare_hugepage_range(file, addr, len)	(-EINVAL)
Linus Torvalds's avatar
Linus Torvalds committed
154
#define pmd_huge(x)	0
Andi Kleen's avatar
Andi Kleen committed
155
#define pud_huge(x)	0
Linus Torvalds's avatar
Linus Torvalds committed
156
#define is_hugepage_only_range(mm, addr, len)	0
157
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
158
#define hugetlb_fault(mm, vma, addr, flags)	({ BUG(); 0; })
159 160
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
				src_addr, pagep)	({ BUG(); 0; })
161
#define huge_pte_offset(mm, address)	0
162 163 164 165 166
static inline int dequeue_hwpoisoned_huge_page(struct page *page)
{
	return 0;
}

167 168 169 170
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
{
	return false;
}
171
#define putback_active_hugepage(p)	do {} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
172

173 174 175 176 177
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	return 0;
}
178

179 180 181 182 183 184 185
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
			struct vm_area_struct *vma, unsigned long start,
			unsigned long end, struct page *ref_page)
{
	BUG();
}

186 187 188 189 190 191 192
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
			struct vm_area_struct *vma, unsigned long start,
			unsigned long end, struct page *ref_page)
{
	BUG();
}

Linus Torvalds's avatar
Linus Torvalds committed
193
#endif /* !CONFIG_HUGETLB_PAGE */
194 195 196 197 198 199 200
/*
 * hugepages at page global directory. If arch support
 * hugepages at pgd level, they need to define this.
 */
#ifndef pgd_huge
#define pgd_huge(x)	0
#endif
201 202 203
#ifndef p4d_huge
#define p4d_huge(x)	0
#endif
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242

#ifndef pgd_write
static inline int pgd_write(pgd_t pgd)
{
	BUG();
	return 0;
}
#endif

#ifndef pud_write
static inline int pud_write(pud_t pud)
{
	BUG();
	return 0;
}
#endif

#ifndef is_hugepd
/*
 * Some architectures requires a hugepage directory format that is
 * required to support multiple hugepage sizes. For example
 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
 * introduced the same on powerpc. This allows for a more flexible hugepage
 * pagetable layout.
 */
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
			      unsigned pdshift, unsigned long end,
			      int write, struct page **pages, int *nr)
{
	return 0;
}
#else
extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
		       unsigned pdshift, unsigned long end,
		       int write, struct page **pages, int *nr);
#endif
Linus Torvalds's avatar
Linus Torvalds committed
243

244 245
#define HUGETLB_ANON_FILE "anon_hugepage"

246 247 248 249 250 251
enum {
	/*
	 * The file will be used as an shm file so shmfs accounting rules
	 * apply
	 */
	HUGETLB_SHMFS_INODE     = 1,
252 253 254 255 256
	/*
	 * The file is being created on the internal vfs mount and shmfs
	 * accounting rules do not apply
	 */
	HUGETLB_ANONHUGE_INODE  = 2,
257 258
};

Linus Torvalds's avatar
Linus Torvalds committed
259 260 261 262 263
#ifdef CONFIG_HUGETLBFS
struct hugetlbfs_sb_info {
	long	max_inodes;   /* inodes allowed */
	long	free_inodes;  /* inodes free */
	spinlock_t	stat_lock;
264
	struct hstate *hstate;
265
	struct hugepage_subpool *spool;
Linus Torvalds's avatar
Linus Torvalds committed
266 267 268 269 270 271 272
};

static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
{
	return sb->s_fs_info;
}

273
extern const struct file_operations hugetlbfs_file_operations;
274
extern const struct vm_operations_struct hugetlb_vm_ops;
275
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
276 277
				struct user_struct **user, int creat_flags,
				int page_size_log);
Linus Torvalds's avatar
Linus Torvalds committed
278

279
static inline bool is_file_hugepages(struct file *file)
Linus Torvalds's avatar
Linus Torvalds committed
280
{
281
	if (file->f_op == &hugetlbfs_file_operations)
282
		return true;
283

284
	return is_file_shm_hugepages(file);
Linus Torvalds's avatar
Linus Torvalds committed
285 286
}

287

Linus Torvalds's avatar
Linus Torvalds committed
288 289
#else /* !CONFIG_HUGETLBFS */

290
#define is_file_hugepages(file)			false
291
static inline struct file *
292 293
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
		struct user_struct **user, int creat_flags,
294
		int page_size_log)
295 296 297
{
	return ERR_PTR(-ENOSYS);
}
Linus Torvalds's avatar
Linus Torvalds committed
298 299 300

#endif /* !CONFIG_HUGETLBFS */

301 302 303 304 305 306
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */

307 308
#ifdef CONFIG_HUGETLB_PAGE

309
#define HSTATE_NAME_LEN 32
310 311
/* Defines one hugetlb page size */
struct hstate {
312 313
	int next_nid_to_alloc;
	int next_nid_to_free;
314 315 316 317 318 319 320 321
	unsigned int order;
	unsigned long mask;
	unsigned long max_huge_pages;
	unsigned long nr_huge_pages;
	unsigned long free_huge_pages;
	unsigned long resv_huge_pages;
	unsigned long surplus_huge_pages;
	unsigned long nr_overcommit_huge_pages;
322
	struct list_head hugepage_activelist;
323 324 325 326
	struct list_head hugepage_freelists[MAX_NUMNODES];
	unsigned int nr_huge_pages_node[MAX_NUMNODES];
	unsigned int free_huge_pages_node[MAX_NUMNODES];
	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
327 328 329 330
#ifdef CONFIG_CGROUP_HUGETLB
	/* cgroup control files */
	struct cftype cgroup_files[5];
#endif
331
	char name[HSTATE_NAME_LEN];
332 333
};

334 335 336
struct huge_bootmem_page {
	struct list_head list;
	struct hstate *hstate;
337 338 339
#ifdef CONFIG_HIGHMEM
	phys_addr_t phys;
#endif
340 341
};

342 343
struct page *alloc_huge_page(struct vm_area_struct *vma,
				unsigned long addr, int avoid_reserve);
344
struct page *alloc_huge_page_node(struct hstate *h, int nid);
345 346
struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
				unsigned long addr, int avoid_reserve);
347 348
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
			pgoff_t idx);
349

350 351 352
/* arch callback */
int __init alloc_bootmem_huge_page(struct hstate *h);

353
void __init hugetlb_bad_size(void);
354 355 356 357 358 359 360 361 362 363 364
void __init hugetlb_add_hstate(unsigned order);
struct hstate *size_to_hstate(unsigned long size);

#ifndef HUGE_MAX_HSTATE
#define HUGE_MAX_HSTATE 1
#endif

extern struct hstate hstates[HUGE_MAX_HSTATE];
extern unsigned int default_hstate_idx;

#define default_hstate (hstates[default_hstate_idx])
365

366
static inline struct hstate *hstate_inode(struct inode *i)
367
{
368
	return HUGETLBFS_SB(i->i_sb)->hstate;
369 370 371 372
}

static inline struct hstate *hstate_file(struct file *f)
{
Al Viro's avatar
Al Viro committed
373
	return hstate_inode(file_inode(f));
374 375
}

376 377 378 379
static inline struct hstate *hstate_sizelog(int page_size_log)
{
	if (!page_size_log)
		return &default_hstate;
380 381

	return size_to_hstate(1UL << page_size_log);
382 383
}

384
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
385
{
386
	return hstate_file(vma->vm_file);
387 388 389 390 391 392 393
}

static inline unsigned long huge_page_size(struct hstate *h)
{
	return (unsigned long)PAGE_SIZE << h->order;
}

394 395
extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);

396 397
extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
static inline unsigned long huge_page_mask(struct hstate *h)
{
	return h->mask;
}

static inline unsigned int huge_page_order(struct hstate *h)
{
	return h->order;
}

static inline unsigned huge_page_shift(struct hstate *h)
{
	return h->order + PAGE_SHIFT;
}

413 414 415 416 417
static inline bool hstate_is_gigantic(struct hstate *h)
{
	return huge_page_order(h) >= MAX_ORDER;
}

418 419 420 421 422 423 424 425 426 427 428 429
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
	return 1 << h->order;
}

static inline unsigned int blocks_per_huge_page(struct hstate *h)
{
	return huge_page_size(h) / 512;
}

#include <asm/hugetlb.h>

430 431 432 433 434 435 436 437
#ifndef arch_make_huge_pte
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
				       struct page *page, int writable)
{
	return entry;
}
#endif

438 439
static inline struct hstate *page_hstate(struct page *page)
{
440
	VM_BUG_ON_PAGE(!PageHuge(page), page);
441 442 443
	return size_to_hstate(PAGE_SIZE << compound_order(page));
}

444 445 446 447 448
static inline unsigned hstate_index_to_shift(unsigned index)
{
	return hstates[index].order + PAGE_SHIFT;
}

449 450 451 452 453
static inline int hstate_index(struct hstate *h)
{
	return h - hstates;
}

454 455 456 457 458 459 460 461 462 463 464
pgoff_t __basepage_index(struct page *page);

/* Return page->index in PAGE_SIZE units */
static inline pgoff_t basepage_index(struct page *page)
{
	if (!PageCompound(page))
		return page->index;

	return __basepage_index(page);
}

465 466
extern int dissolve_free_huge_pages(unsigned long start_pfn,
				    unsigned long end_pfn);
467
static inline bool hugepage_migration_supported(struct hstate *h)
468
{
469
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
470 471 472 473 474
	if ((huge_page_shift(h) == PMD_SHIFT) ||
		(huge_page_shift(h) == PGDIR_SHIFT))
		return true;
	else
		return false;
475
#else
476
	return false;
477
#endif
478
}
479

480 481 482 483 484 485 486 487 488
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
					   struct mm_struct *mm, pte_t *pte)
{
	if (huge_page_size(h) == PMD_SIZE)
		return pmd_lockptr(mm, (pmd_t *) pte);
	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
	return &mm->page_table_lock;
}

489 490 491 492 493 494 495 496
#ifndef hugepages_supported
/*
 * Some platform decide whether they support huge pages at boot
 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 * when there is no such support
 */
#define hugepages_supported() (HPAGE_SHIFT != 0)
#endif
497

498 499 500 501 502 503 504 505 506 507 508
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);

static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
	atomic_long_add(l, &mm->hugetlb_usage);
}

static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
	atomic_long_sub(l, &mm->hugetlb_usage);
}
509
#else	/* CONFIG_HUGETLB_PAGE */
510
struct hstate {};
511
#define alloc_huge_page(v, a, r) NULL
512
#define alloc_huge_page_node(h, nid) NULL
513
#define alloc_huge_page_noerr(v, a, r) NULL
514
#define alloc_bootmem_huge_page(h) NULL
515
#define hstate_file(f) NULL
516
#define hstate_sizelog(s) NULL
517 518
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
519
#define page_hstate(page) NULL
520 521
#define huge_page_size(h) PAGE_SIZE
#define huge_page_mask(h) PAGE_MASK
522
#define vma_kernel_pagesize(v) PAGE_SIZE
523
#define vma_mmu_pagesize(v) PAGE_SIZE
524 525
#define huge_page_order(h) 0
#define huge_page_shift(h) PAGE_SHIFT
526 527 528 529 530
static inline bool hstate_is_gigantic(struct hstate *h)
{
	return false;
}

531 532 533 534
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
	return 1;
}
535
#define hstate_index_to_shift(index) 0
536
#define hstate_index(h) 0
537 538 539 540 541

static inline pgoff_t basepage_index(struct page *page)
{
	return page->index;
}
542
#define dissolve_free_huge_pages(s, e)	0
543
#define hugepage_migration_supported(h)	false
544 545 546 547 548 549

static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
					   struct mm_struct *mm, pte_t *pte)
{
	return &mm->page_table_lock;
}
550 551 552 553 554 555 556 557

static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}

static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
558
#endif	/* CONFIG_HUGETLB_PAGE */
559

560 561 562 563 564 565 566 567 568 569
static inline spinlock_t *huge_pte_lock(struct hstate *h,
					struct mm_struct *mm, pte_t *pte)
{
	spinlock_t *ptl;

	ptl = huge_pte_lockptr(h, mm, pte);
	spin_lock(ptl);
	return ptl;
}

Linus Torvalds's avatar
Linus Torvalds committed
570
#endif /* _LINUX_HUGETLB_H */