hugetlb.h 19.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
Linus Torvalds's avatar
Linus Torvalds committed
2 3 4
#ifndef _LINUX_HUGETLB_H
#define _LINUX_HUGETLB_H

Linus Torvalds's avatar
Linus Torvalds committed
5
#include <linux/mm_types.h>
6
#include <linux/mmdebug.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
7
#include <linux/fs.h>
8
#include <linux/hugetlb_inline.h>
9
#include <linux/cgroup.h>
10 11
#include <linux/list.h>
#include <linux/kref.h>
12
#include <asm/pgtable.h>
Alexey Dobriyan's avatar
Alexey Dobriyan committed
13

14 15
struct ctl_table;
struct user_struct;
16
struct mmu_gather;
17

18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
#ifndef is_hugepd
/*
 * Some architectures requires a hugepage directory format that is
 * required to support multiple hugepage sizes. For example
 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
 * introduced the same on powerpc. This allows for a more flexible hugepage
 * pagetable layout.
 */
typedef struct { unsigned long pd; } hugepd_t;
#define is_hugepd(hugepd) (0)
#define __hugepd(x) ((hugepd_t) { (x) })
static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
			      unsigned pdshift, unsigned long end,
			      int write, struct page **pages, int *nr)
{
	return 0;
}
#else
extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
		       unsigned pdshift, unsigned long end,
		       int write, struct page **pages, int *nr);
#endif


Linus Torvalds's avatar
Linus Torvalds committed
42 43 44
#ifdef CONFIG_HUGETLB_PAGE

#include <linux/mempolicy.h>
45
#include <linux/shm.h>
David Gibson's avatar
David Gibson committed
46
#include <asm/tlbflush.h>
Linus Torvalds's avatar
Linus Torvalds committed
47

48 49 50
struct hugepage_subpool {
	spinlock_t lock;
	long count;
51 52 53 54 55 56 57
	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
	long used_hpages;	/* Used count against maximum, includes */
				/* both alloced and reserved pages. */
	struct hstate *hstate;
	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
	long rsv_hpages;	/* Pages reserved against global pool to */
				/* sasitfy minimum size. */
58 59
};

60 61
struct resv_map {
	struct kref refs;
62
	spinlock_t lock;
63
	struct list_head regions;
64 65 66
	long adds_in_progress;
	struct list_head region_cache;
	long region_cache_count;
67 68 69 70
};
extern struct resv_map *resv_map_alloc(void);
void resv_map_release(struct kref *ref);

71 72 73 74 75
extern spinlock_t hugetlb_lock;
extern int hugetlb_max_hstate __read_mostly;
#define for_each_hstate(h) \
	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)

76 77
struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
						long min_hpages);
78 79
void hugepage_put_subpool(struct hugepage_subpool *spool);

80
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
81 82 83
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
84 85 86 87 88 89

#ifdef CONFIG_NUMA
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
90
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
91 92
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
			 struct page **, struct vm_area_struct **,
93 94
			 unsigned long *, unsigned long *, long, unsigned int,
			 int *);
95
void unmap_hugepage_range(struct vm_area_struct *,
96
			  unsigned long, unsigned long, struct page *);
97 98 99 100
void __unmap_hugepage_range_final(struct mmu_gather *tlb,
			  struct vm_area_struct *vma,
			  unsigned long start, unsigned long end,
			  struct page *ref_page);
101 102 103
void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
				unsigned long start, unsigned long end,
				struct page *ref_page);
104
void hugetlb_report_meminfo(struct seq_file *);
Linus Torvalds's avatar
Linus Torvalds committed
105
int hugetlb_report_node_meminfo(int, char *);
106
void hugetlb_show_meminfo(void);
Linus Torvalds's avatar
Linus Torvalds committed
107
unsigned long hugetlb_total_pages(void);
108
vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109
			unsigned long address, unsigned int flags);
110 111 112 113 114
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
				struct vm_area_struct *dst_vma,
				unsigned long dst_addr,
				unsigned long src_addr,
				struct page **pagep);
115
int hugetlb_reserve_pages(struct inode *inode, long from, long to,
116
						struct vm_area_struct *vma,
117
						vm_flags_t vm_flags);
118 119
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
						long freed);
120 121
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
122
void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
123
void free_huge_page(struct page *page);
124
void hugetlb_fix_reserve_counts(struct inode *inode);
125 126 127 128 129
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
				struct vm_area_struct *vma,
				struct address_space *mapping,
				pgoff_t idx, unsigned long address);
Linus Torvalds's avatar
Linus Torvalds committed
130

131 132
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);

Linus Torvalds's avatar
Linus Torvalds committed
133
extern int sysctl_hugetlb_shm_group;
134
extern struct list_head huge_boot_pages;
Linus Torvalds's avatar
Linus Torvalds committed
135

David Gibson's avatar
David Gibson committed
136 137
/* arch callbacks */

138 139
pte_t *huge_pte_alloc(struct mm_struct *mm,
			unsigned long addr, unsigned long sz);
140 141
pte_t *huge_pte_offset(struct mm_struct *mm,
		       unsigned long addr, unsigned long sz);
142
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
143 144
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end);
David Gibson's avatar
David Gibson committed
145 146
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
			      int write);
147 148 149
struct page *follow_huge_pd(struct vm_area_struct *vma,
			    unsigned long address, hugepd_t hpd,
			    int flags, int pdshift);
David Gibson's avatar
David Gibson committed
150
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
151
				pmd_t *pmd, int flags);
Andi Kleen's avatar
Andi Kleen committed
152
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
153
				pud_t *pud, int flags);
154 155 156
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
			     pgd_t *pgd, int flags);

David Gibson's avatar
David Gibson committed
157
int pmd_huge(pmd_t pmd);
158
int pud_huge(pud_t pud);
159
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
160
		unsigned long address, unsigned long end, pgprot_t newprot);
David Gibson's avatar
David Gibson committed
161

162
bool is_hugetlb_entry_migration(pte_t pte);
163

Linus Torvalds's avatar
Linus Torvalds committed
164 165
#else /* !CONFIG_HUGETLB_PAGE */

166 167 168 169
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
}

Linus Torvalds's avatar
Linus Torvalds committed
170 171 172 173 174
static inline unsigned long hugetlb_total_pages(void)
{
	return 0;
}

175 176 177 178 179 180 181 182 183 184 185 186
static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
					pte_t *ptep)
{
	return 0;
}

static inline void adjust_range_if_pmd_sharing_possible(
				struct vm_area_struct *vma,
				unsigned long *start, unsigned long *end)
{
}

187
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)	({ BUG(); 0; })
Linus Torvalds's avatar
Linus Torvalds committed
188 189
#define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
190 191 192
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
Linus Torvalds's avatar
Linus Torvalds committed
193
#define hugetlb_report_node_meminfo(n, buf)	0
194 195 196
static inline void hugetlb_show_meminfo(void)
{
}
197
#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
198 199
#define follow_huge_pmd(mm, addr, pmd, flags)	NULL
#define follow_huge_pud(mm, addr, pud, flags)	NULL
200
#define follow_huge_pgd(mm, addr, pgd, flags)	NULL
201
#define prepare_hugepage_range(file, addr, len)	(-EINVAL)
Linus Torvalds's avatar
Linus Torvalds committed
202
#define pmd_huge(x)	0
Andi Kleen's avatar
Andi Kleen committed
203
#define pud_huge(x)	0
Linus Torvalds's avatar
Linus Torvalds committed
204
#define is_hugepage_only_range(mm, addr, len)	0
205
#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
206 207
#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
				src_addr, pagep)	({ BUG(); 0; })
208
#define huge_pte_offset(mm, address, sz)	0
209

210 211 212 213
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
{
	return false;
}
214
#define putback_active_hugepage(p)	do {} while (0)
215
#define move_hugetlb_state(old, new, reason)	do {} while (0)
Linus Torvalds's avatar
Linus Torvalds committed
216

217 218 219 220 221
static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
		unsigned long address, unsigned long end, pgprot_t newprot)
{
	return 0;
}
222

223 224 225 226 227 228 229
static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
			struct vm_area_struct *vma, unsigned long start,
			unsigned long end, struct page *ref_page)
{
	BUG();
}

230 231 232 233 234 235
static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
			struct vm_area_struct *vma, unsigned long start,
			unsigned long end, struct page *ref_page)
{
	BUG();
}
236 237 238 239 240 241 242
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
				struct vm_area_struct *vma, unsigned long address,
				unsigned int flags)
{
	BUG();
	return 0;
}
243

Linus Torvalds's avatar
Linus Torvalds committed
244
#endif /* !CONFIG_HUGETLB_PAGE */
245 246 247 248 249 250 251
/*
 * hugepages at page global directory. If arch support
 * hugepages at pgd level, they need to define this.
 */
#ifndef pgd_huge
#define pgd_huge(x)	0
#endif
252 253 254
#ifndef p4d_huge
#define p4d_huge(x)	0
#endif
255 256 257 258 259 260 261 262 263

#ifndef pgd_write
static inline int pgd_write(pgd_t pgd)
{
	BUG();
	return 0;
}
#endif

264 265
#define HUGETLB_ANON_FILE "anon_hugepage"

266 267 268 269 270 271
enum {
	/*
	 * The file will be used as an shm file so shmfs accounting rules
	 * apply
	 */
	HUGETLB_SHMFS_INODE     = 1,
272 273 274 275 276
	/*
	 * The file is being created on the internal vfs mount and shmfs
	 * accounting rules do not apply
	 */
	HUGETLB_ANONHUGE_INODE  = 2,
277 278
};

Linus Torvalds's avatar
Linus Torvalds committed
279 280 281 282 283
#ifdef CONFIG_HUGETLBFS
struct hugetlbfs_sb_info {
	long	max_inodes;   /* inodes allowed */
	long	free_inodes;  /* inodes free */
	spinlock_t	stat_lock;
284
	struct hstate *hstate;
285
	struct hugepage_subpool *spool;
286 287 288
	kuid_t	uid;
	kgid_t	gid;
	umode_t mode;
Linus Torvalds's avatar
Linus Torvalds committed
289 290 291 292 293 294 295
};

static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
{
	return sb->s_fs_info;
}

296 297 298
struct hugetlbfs_inode_info {
	struct shared_policy policy;
	struct inode vfs_inode;
299
	unsigned int seals;
300 301 302 303 304 305 306
};

static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
{
	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}

307
extern const struct file_operations hugetlbfs_file_operations;
308
extern const struct vm_operations_struct hugetlb_vm_ops;
309
struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
310 311
				struct user_struct **user, int creat_flags,
				int page_size_log);
Linus Torvalds's avatar
Linus Torvalds committed
312

313
static inline bool is_file_hugepages(struct file *file)
Linus Torvalds's avatar
Linus Torvalds committed
314
{
315
	if (file->f_op == &hugetlbfs_file_operations)
316
		return true;
317

318
	return is_file_shm_hugepages(file);
Linus Torvalds's avatar
Linus Torvalds committed
319 320
}

321

Linus Torvalds's avatar
Linus Torvalds committed
322 323
#else /* !CONFIG_HUGETLBFS */

324
#define is_file_hugepages(file)			false
325
static inline struct file *
326 327
hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
		struct user_struct **user, int creat_flags,
328
		int page_size_log)
329 330 331
{
	return ERR_PTR(-ENOSYS);
}
Linus Torvalds's avatar
Linus Torvalds committed
332 333 334

#endif /* !CONFIG_HUGETLBFS */

335 336 337 338 339 340
#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags);
#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */

341 342
#ifdef CONFIG_HUGETLB_PAGE

343
#define HSTATE_NAME_LEN 32
344 345
/* Defines one hugetlb page size */
struct hstate {
346 347
	int next_nid_to_alloc;
	int next_nid_to_free;
348 349 350 351 352 353 354 355
	unsigned int order;
	unsigned long mask;
	unsigned long max_huge_pages;
	unsigned long nr_huge_pages;
	unsigned long free_huge_pages;
	unsigned long resv_huge_pages;
	unsigned long surplus_huge_pages;
	unsigned long nr_overcommit_huge_pages;
356
	struct list_head hugepage_activelist;
357 358 359 360
	struct list_head hugepage_freelists[MAX_NUMNODES];
	unsigned int nr_huge_pages_node[MAX_NUMNODES];
	unsigned int free_huge_pages_node[MAX_NUMNODES];
	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
361 362 363 364
#ifdef CONFIG_CGROUP_HUGETLB
	/* cgroup control files */
	struct cftype cgroup_files[5];
#endif
365
	char name[HSTATE_NAME_LEN];
366 367
};

368 369 370 371 372
struct huge_bootmem_page {
	struct list_head list;
	struct hstate *hstate;
};

373 374
struct page *alloc_huge_page(struct vm_area_struct *vma,
				unsigned long addr, int avoid_reserve);
375
struct page *alloc_huge_page_node(struct hstate *h, int nid);
376 377
struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
				nodemask_t *nmask);
378 379
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
				unsigned long address);
380 381
struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
				     int nid, nodemask_t *nmask);
382 383
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
			pgoff_t idx);
384

385
/* arch callback */
386
int __init __alloc_bootmem_huge_page(struct hstate *h);
387 388
int __init alloc_bootmem_huge_page(struct hstate *h);

389
void __init hugetlb_bad_size(void);
390 391 392 393 394 395 396 397 398 399 400
void __init hugetlb_add_hstate(unsigned order);
struct hstate *size_to_hstate(unsigned long size);

#ifndef HUGE_MAX_HSTATE
#define HUGE_MAX_HSTATE 1
#endif

extern struct hstate hstates[HUGE_MAX_HSTATE];
extern unsigned int default_hstate_idx;

#define default_hstate (hstates[default_hstate_idx])
401

402
static inline struct hstate *hstate_inode(struct inode *i)
403
{
404
	return HUGETLBFS_SB(i->i_sb)->hstate;
405 406 407 408
}

static inline struct hstate *hstate_file(struct file *f)
{
Al Viro's avatar
Al Viro committed
409
	return hstate_inode(file_inode(f));
410 411
}

412 413 414 415
static inline struct hstate *hstate_sizelog(int page_size_log)
{
	if (!page_size_log)
		return &default_hstate;
416 417

	return size_to_hstate(1UL << page_size_log);
418 419
}

420
static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
421
{
422
	return hstate_file(vma->vm_file);
423 424 425 426 427 428 429
}

static inline unsigned long huge_page_size(struct hstate *h)
{
	return (unsigned long)PAGE_SIZE << h->order;
}

430 431
extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);

432 433
extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
static inline unsigned long huge_page_mask(struct hstate *h)
{
	return h->mask;
}

static inline unsigned int huge_page_order(struct hstate *h)
{
	return h->order;
}

static inline unsigned huge_page_shift(struct hstate *h)
{
	return h->order + PAGE_SHIFT;
}

449 450 451 452 453
static inline bool hstate_is_gigantic(struct hstate *h)
{
	return huge_page_order(h) >= MAX_ORDER;
}

454 455 456 457 458 459 460 461 462 463 464 465
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
	return 1 << h->order;
}

static inline unsigned int blocks_per_huge_page(struct hstate *h)
{
	return huge_page_size(h) / 512;
}

#include <asm/hugetlb.h>

466 467 468 469 470 471 472 473
#ifndef arch_make_huge_pte
static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
				       struct page *page, int writable)
{
	return entry;
}
#endif

474 475
static inline struct hstate *page_hstate(struct page *page)
{
476
	VM_BUG_ON_PAGE(!PageHuge(page), page);
477 478 479
	return size_to_hstate(PAGE_SIZE << compound_order(page));
}

480 481 482 483 484
static inline unsigned hstate_index_to_shift(unsigned index)
{
	return hstates[index].order + PAGE_SHIFT;
}

485 486 487 488 489
static inline int hstate_index(struct hstate *h)
{
	return h - hstates;
}

490 491 492 493 494 495 496 497 498 499 500
pgoff_t __basepage_index(struct page *page);

/* Return page->index in PAGE_SIZE units */
static inline pgoff_t basepage_index(struct page *page)
{
	if (!PageCompound(page))
		return page->index;

	return __basepage_index(page);
}

501
extern int dissolve_free_huge_page(struct page *page);
502 503
extern int dissolve_free_huge_pages(unsigned long start_pfn,
				    unsigned long end_pfn);
504

505
#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
506 507 508
#ifndef arch_hugetlb_migration_supported
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
{
509
	if ((huge_page_shift(h) == PMD_SHIFT) ||
510 511
		(huge_page_shift(h) == PUD_SHIFT) ||
			(huge_page_shift(h) == PGDIR_SHIFT))
512 513 514
		return true;
	else
		return false;
515 516
}
#endif
517
#else
518 519
static inline bool arch_hugetlb_migration_supported(struct hstate *h)
{
520
	return false;
521
}
522
#endif
523 524 525 526

static inline bool hugepage_migration_supported(struct hstate *h)
{
	return arch_hugetlb_migration_supported(h);
527
}
528

529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
/*
 * Movability check is different as compared to migration check.
 * It determines whether or not a huge page should be placed on
 * movable zone or not. Movability of any huge page should be
 * required only if huge page size is supported for migration.
 * There wont be any reason for the huge page to be movable if
 * it is not migratable to start with. Also the size of the huge
 * page should be large enough to be placed under a movable zone
 * and still feasible enough to be migratable. Just the presence
 * in movable zone does not make the migration feasible.
 *
 * So even though large huge page sizes like the gigantic ones
 * are migratable they should not be movable because its not
 * feasible to migrate them from movable zone.
 */
static inline bool hugepage_movable_supported(struct hstate *h)
{
	if (!hugepage_migration_supported(h))
		return false;

	if (hstate_is_gigantic(h))
		return false;
	return true;
}

554 555 556 557 558 559 560 561 562
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
					   struct mm_struct *mm, pte_t *pte)
{
	if (huge_page_size(h) == PMD_SIZE)
		return pmd_lockptr(mm, (pmd_t *) pte);
	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
	return &mm->page_table_lock;
}

563 564 565 566 567 568 569 570
#ifndef hugepages_supported
/*
 * Some platform decide whether they support huge pages at boot
 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 * when there is no such support
 */
#define hugepages_supported() (HPAGE_SHIFT != 0)
#endif
571

572 573 574 575 576 577 578 579 580 581 582
void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);

static inline void hugetlb_count_add(long l, struct mm_struct *mm)
{
	atomic_long_add(l, &mm->hugetlb_usage);
}

static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
	atomic_long_sub(l, &mm->hugetlb_usage);
}
583 584 585 586 587 588 589 590

#ifndef set_huge_swap_pte_at
static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
					pte_t *ptep, pte_t pte, unsigned long sz)
{
	set_huge_pte_at(mm, addr, ptep, pte);
}
#endif
591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610

#ifndef huge_ptep_modify_prot_start
#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
						unsigned long addr, pte_t *ptep)
{
	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
}
#endif

#ifndef huge_ptep_modify_prot_commit
#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
						unsigned long addr, pte_t *ptep,
						pte_t old_pte, pte_t pte)
{
	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
#endif

611
#else	/* CONFIG_HUGETLB_PAGE */
612
struct hstate {};
613
#define alloc_huge_page(v, a, r) NULL
614
#define alloc_huge_page_node(h, nid) NULL
615
#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
616
#define alloc_huge_page_vma(h, vma, address) NULL
617
#define alloc_bootmem_huge_page(h) NULL
618
#define hstate_file(f) NULL
619
#define hstate_sizelog(s) NULL
620 621
#define hstate_vma(v) NULL
#define hstate_inode(i) NULL
622
#define page_hstate(page) NULL
623 624
#define huge_page_size(h) PAGE_SIZE
#define huge_page_mask(h) PAGE_MASK
625
#define vma_kernel_pagesize(v) PAGE_SIZE
626
#define vma_mmu_pagesize(v) PAGE_SIZE
627 628
#define huge_page_order(h) 0
#define huge_page_shift(h) PAGE_SHIFT
629 630 631 632 633
static inline bool hstate_is_gigantic(struct hstate *h)
{
	return false;
}

634 635 636 637
static inline unsigned int pages_per_huge_page(struct hstate *h)
{
	return 1;
}
638 639 640 641 642 643 644 645 646 647

static inline unsigned hstate_index_to_shift(unsigned index)
{
	return 0;
}

static inline int hstate_index(struct hstate *h)
{
	return 0;
}
648 649 650 651 652

static inline pgoff_t basepage_index(struct page *page)
{
	return page->index;
}
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668

static inline int dissolve_free_huge_page(struct page *page)
{
	return 0;
}

static inline int dissolve_free_huge_pages(unsigned long start_pfn,
					   unsigned long end_pfn)
{
	return 0;
}

static inline bool hugepage_migration_supported(struct hstate *h)
{
	return false;
}
669

670 671 672 673 674
static inline bool hugepage_movable_supported(struct hstate *h)
{
	return false;
}

675 676 677 678 679
static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
					   struct mm_struct *mm, pte_t *pte)
{
	return &mm->page_table_lock;
}
680 681 682 683 684 685 686 687

static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
{
}

static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
{
}
688 689 690 691 692

static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
					pte_t *ptep, pte_t pte, unsigned long sz)
{
}
693
#endif	/* CONFIG_HUGETLB_PAGE */
694

695 696 697 698 699 700 701 702 703 704
static inline spinlock_t *huge_pte_lock(struct hstate *h,
					struct mm_struct *mm, pte_t *pte)
{
	spinlock_t *ptl;

	ptl = huge_pte_lockptr(h, mm, pte);
	spin_lock(ptl);
	return ptl;
}

Linus Torvalds's avatar
Linus Torvalds committed
705
#endif /* _LINUX_HUGETLB_H */