hugetlbpage.c 15.8 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
/*
Becky Bruce's avatar
Becky Bruce committed
2
 * PPC Huge TLB Page Support for Kernel.
Linus Torvalds's avatar
Linus Torvalds committed
3 4
 *
 * Copyright (C) 2003 David Gibson, IBM Corporation.
Becky Bruce's avatar
Becky Bruce committed
5
 * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
Linus Torvalds's avatar
Linus Torvalds committed
6 7 8 9 10 11
 *
 * Based on the IA-32 version:
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 */

#include <linux/mm.h>
12
#include <linux/io.h>
13
#include <linux/slab.h>
Linus Torvalds's avatar
Linus Torvalds committed
14
#include <linux/hugetlb.h>
15
#include <linux/export.h>
Becky Bruce's avatar
Becky Bruce committed
16 17
#include <linux/of_fdt.h>
#include <linux/memblock.h>
18
#include <linux/moduleparam.h>
19 20
#include <linux/swap.h>
#include <linux/swapops.h>
21
#include <linux/kmemleak.h>
22
#include <asm/pgtable.h>
Linus Torvalds's avatar
Linus Torvalds committed
23 24
#include <asm/pgalloc.h>
#include <asm/tlb.h>
Becky Bruce's avatar
Becky Bruce committed
25
#include <asm/setup.h>
26
#include <asm/hugetlb.h>
27 28
#include <asm/pte-walk.h>

29 30
bool hugetlb_disabled = false;

31
#define hugepd_none(hpd)	(hpd_val(hpd) == 0)
32

33 34
#define PTE_T_ORDER	(__builtin_ffs(sizeof(pte_t)) - __builtin_ffs(sizeof(void *)))

35
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, unsigned long sz)
36
{
37 38 39 40 41
	/*
	 * Only called for hugetlbfs pages, hence can ignore THP and the
	 * irq disabled walk.
	 */
	return __find_linux_pte(mm->pgd, addr, NULL, NULL);
42 43
}

44
static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
45 46
			   unsigned long address, unsigned int pdshift,
			   unsigned int pshift, spinlock_t *ptl)
47
{
Becky Bruce's avatar
Becky Bruce committed
48 49 50
	struct kmem_cache *cachep;
	pte_t *new;
	int i;
51 52 53
	int num_hugepd;

	if (pshift >= pdshift) {
54
		cachep = PGT_CACHE(PTE_T_ORDER);
55
		num_hugepd = 1 << (pshift - pdshift);
56 57 58
	} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
		cachep = PGT_CACHE(PTE_INDEX_SIZE);
		num_hugepd = 1;
59 60 61 62
	} else {
		cachep = PGT_CACHE(pdshift - pshift);
		num_hugepd = 1;
	}
Becky Bruce's avatar
Becky Bruce committed
63

64
	new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
65

66 67 68
	BUG_ON(pshift > HUGEPD_SHIFT_MASK);
	BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);

69 70 71
	if (! new)
		return -ENOMEM;

72 73 74 75 76 77 78
	/*
	 * Make sure other cpus find the hugepd set only after a
	 * properly initialized page table is visible to them.
	 * For more details look for comment in __pte_alloc().
	 */
	smp_wmb();

79
	spin_lock(ptl);
Becky Bruce's avatar
Becky Bruce committed
80 81 82 83 84 85 86 87 88
	/*
	 * We have multiple higher-level entries that point to the same
	 * actual pte location.  Fill in each as we go and backtrack on error.
	 * We need all of these so the DTLB pgtable walk code can find the
	 * right higher-level entry without knowing if it's a hugepage or not.
	 */
	for (i = 0; i < num_hugepd; i++, hpdp++) {
		if (unlikely(!hugepd_none(*hpdp)))
			break;
89
		hugepd_populate(hpdp, new, pshift);
Becky Bruce's avatar
Becky Bruce committed
90 91 92 93
	}
	/* If we bailed from the for loop early, an error occurred, clean up */
	if (i < num_hugepd) {
		for (i = i - 1 ; i >= 0; i--, hpdp--)
94
			*hpdp = __hugepd(0);
Becky Bruce's avatar
Becky Bruce committed
95
		kmem_cache_free(cachep, new);
96 97
	} else {
		kmemleak_ignore(new);
Becky Bruce's avatar
Becky Bruce committed
98
	}
99
	spin_unlock(ptl);
100 101 102
	return 0;
}

103 104 105 106 107 108 109 110 111 112 113 114
/*
 * At this point we do the placement change only for BOOK3S 64. This would
 * possibly work on other subarchs.
 */
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
{
	pgd_t *pg;
	pud_t *pu;
	pmd_t *pm;
	hugepd_t *hpdp = NULL;
	unsigned pshift = __ffs(sz);
	unsigned pdshift = PGDIR_SHIFT;
115
	spinlock_t *ptl;
116 117 118 119

	addr &= ~(sz-1);
	pg = pgd_offset(mm, addr);

120
#ifdef CONFIG_PPC_BOOK3S_64
121 122 123
	if (pshift == PGDIR_SHIFT)
		/* 16GB huge page */
		return (pte_t *) pg;
124
	else if (pshift > PUD_SHIFT) {
125 126 127
		/*
		 * We need to use hugepd table
		 */
128
		ptl = &mm->page_table_lock;
129
		hpdp = (hugepd_t *)pg;
130
	} else {
131 132 133 134
		pdshift = PUD_SHIFT;
		pu = pud_alloc(mm, pg, addr);
		if (pshift == PUD_SHIFT)
			return (pte_t *)pu;
135 136
		else if (pshift > PMD_SHIFT) {
			ptl = pud_lockptr(mm, pu);
137
			hpdp = (hugepd_t *)pu;
138
		} else {
139 140 141 142 143
			pdshift = PMD_SHIFT;
			pm = pmd_alloc(mm, pu, addr);
			if (pshift == PMD_SHIFT)
				/* 16MB hugepage */
				return (pte_t *)pm;
144 145
			else {
				ptl = pmd_lockptr(mm, pm);
146
				hpdp = (hugepd_t *)pm;
147
			}
148 149 150
		}
	}
#else
151
	if (pshift >= PGDIR_SHIFT) {
152
		ptl = &mm->page_table_lock;
153 154 155 156
		hpdp = (hugepd_t *)pg;
	} else {
		pdshift = PUD_SHIFT;
		pu = pud_alloc(mm, pg, addr);
157
		if (pshift >= PUD_SHIFT) {
158
			ptl = pud_lockptr(mm, pu);
159 160 161 162
			hpdp = (hugepd_t *)pu;
		} else {
			pdshift = PMD_SHIFT;
			pm = pmd_alloc(mm, pu, addr);
163
			ptl = pmd_lockptr(mm, pm);
164 165 166
			hpdp = (hugepd_t *)pm;
		}
	}
167
#endif
168 169 170 171 172
	if (!hpdp)
		return NULL;

	BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));

173 174
	if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr,
						  pdshift, pshift, ptl))
175 176
		return NULL;

177
	return hugepte_offset(*hpdp, addr, pdshift);
178 179
}

180
#ifdef CONFIG_PPC_BOOK3S_64
Becky Bruce's avatar
Becky Bruce committed
181
/*
182 183
 * Tracks gpages after the device tree is scanned and before the
 * huge_boot_pages list is ready on pseries.
Becky Bruce's avatar
Becky Bruce committed
184
 */
185 186 187
#define MAX_NUMBER_GPAGES	1024
__initdata static u64 gpage_freearray[MAX_NUMBER_GPAGES];
__initdata static unsigned nr_gpages;
Becky Bruce's avatar
Becky Bruce committed
188 189

/*
190
 * Build list of addresses of gigantic pages.  This function is used in early
191
 * boot before the buddy allocator is setup.
Becky Bruce's avatar
Becky Bruce committed
192
 */
193
void __init pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
194 195 196 197 198 199 200 201 202 203 204
{
	if (!addr)
		return;
	while (number_of_pages > 0) {
		gpage_freearray[nr_gpages] = addr;
		nr_gpages++;
		number_of_pages--;
		addr += page_size;
	}
}

205
int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate)
206 207 208 209 210 211 212
{
	struct huge_bootmem_page *m;
	if (nr_gpages == 0)
		return 0;
	m = phys_to_virt(gpage_freearray[--nr_gpages]);
	gpage_freearray[nr_gpages] = 0;
	list_add(&m->list, &huge_boot_pages);
213
	m->hstate = hstate;
214 215
	return 1;
}
Becky Bruce's avatar
Becky Bruce committed
216
#endif
217

218 219 220 221 222 223 224 225 226 227 228

int __init alloc_bootmem_huge_page(struct hstate *h)
{

#ifdef CONFIG_PPC_BOOK3S_64
	if (firmware_has_feature(FW_FEATURE_LPAR) && !radix_enabled())
		return pseries_alloc_bootmem_huge_page(h);
#endif
	return __alloc_bootmem_huge_page(h);
}

229
#ifndef CONFIG_PPC_BOOK3S_64
Becky Bruce's avatar
Becky Bruce committed
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
#define HUGEPD_FREELIST_SIZE \
	((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))

struct hugepd_freelist {
	struct rcu_head	rcu;
	unsigned int index;
	void *ptes[0];
};

static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);

static void hugepd_free_rcu_callback(struct rcu_head *head)
{
	struct hugepd_freelist *batch =
		container_of(head, struct hugepd_freelist, rcu);
	unsigned int i;

	for (i = 0; i < batch->index; i++)
248
		kmem_cache_free(PGT_CACHE(PTE_T_ORDER), batch->ptes[i]);
Becky Bruce's avatar
Becky Bruce committed
249 250 251 252 253 254 255 256

	free_page((unsigned long)batch);
}

static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
{
	struct hugepd_freelist **batchp;

257
	batchp = &get_cpu_var(hugepd_freelist_cur);
Becky Bruce's avatar
Becky Bruce committed
258 259

	if (atomic_read(&tlb->mm->mm_users) < 2 ||
260
	    mm_is_thread_local(tlb->mm)) {
261
		kmem_cache_free(PGT_CACHE(PTE_T_ORDER), hugepte);
262
		put_cpu_var(hugepd_freelist_cur);
Becky Bruce's avatar
Becky Bruce committed
263 264 265 266 267 268 269 270 271 272
		return;
	}

	if (*batchp == NULL) {
		*batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
		(*batchp)->index = 0;
	}

	(*batchp)->ptes[(*batchp)->index++] = hugepte;
	if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
273
		call_rcu(&(*batchp)->rcu, hugepd_free_rcu_callback);
Becky Bruce's avatar
Becky Bruce committed
274 275
		*batchp = NULL;
	}
276
	put_cpu_var(hugepd_freelist_cur);
Becky Bruce's avatar
Becky Bruce committed
277
}
278 279
#else
static inline void hugepd_free(struct mmu_gather *tlb, void *hugepte) {}
Becky Bruce's avatar
Becky Bruce committed
280 281
#endif

282 283 284
static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
			      unsigned long start, unsigned long end,
			      unsigned long floor, unsigned long ceiling)
285 286
{
	pte_t *hugepte = hugepd_page(*hpdp);
Becky Bruce's avatar
Becky Bruce committed
287 288
	int i;

289
	unsigned long pdmask = ~((1UL << pdshift) - 1);
Becky Bruce's avatar
Becky Bruce committed
290
	unsigned int num_hugepd = 1;
291
	unsigned int shift = hugepd_shift(*hpdp);
Becky Bruce's avatar
Becky Bruce committed
292

293
	/* Note: On fsl the hpdp may be the first of several */
294 295
	if (shift > pdshift)
		num_hugepd = 1 << (shift - pdshift);
296 297 298 299 300 301 302 303 304 305 306

	start &= pdmask;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= pdmask;
		if (! ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;
307

Becky Bruce's avatar
Becky Bruce committed
308
	for (i = 0; i < num_hugepd; i++, hpdp++)
309
		*hpdp = __hugepd(0);
Becky Bruce's avatar
Becky Bruce committed
310

311 312
	if (shift >= pdshift)
		hugepd_free(tlb, hugepte);
313 314 315
	else if (IS_ENABLED(CONFIG_PPC_8xx))
		pgtable_free_tlb(tlb, hugepte,
				 get_hugepd_cache_index(PTE_INDEX_SIZE));
316
	else
317 318
		pgtable_free_tlb(tlb, hugepte,
				 get_hugepd_cache_index(pdshift - shift));
319 320 321 322
}

static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
				   unsigned long addr, unsigned long end,
323
				   unsigned long floor, unsigned long ceiling)
324 325 326 327 328 329 330
{
	pmd_t *pmd;
	unsigned long next;
	unsigned long start;

	start = addr;
	do {
331 332
		unsigned long more;

333
		pmd = pmd_offset(pud, addr);
334
		next = pmd_addr_end(addr, end);
335
		if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
336 337 338 339 340
			/*
			 * if it is not hugepd pointer, we should already find
			 * it cleared.
			 */
			WARN_ON(!pmd_none_or_clear_bad(pmd));
341
			continue;
342
		}
343 344 345 346 347 348
		/*
		 * Increment next by the size of the huge mapping since
		 * there may be more than one entry at this level for a
		 * single hugepage, but all of them point to
		 * the same kmem cache that holds the hugepte.
		 */
349 350 351 352
		more = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
		if (more > next)
			next = more;

353 354
		free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
				  addr, next, floor, ceiling);
355
	} while (addr = next, addr != end);
356 357 358 359 360 361 362 363

	start &= PUD_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PUD_MASK;
		if (!ceiling)
			return;
Linus Torvalds's avatar
Linus Torvalds committed
364
	}
365 366
	if (end - 1 > ceiling - 1)
		return;
Linus Torvalds's avatar
Linus Torvalds committed
367

368 369
	pmd = pmd_offset(pud, start);
	pud_clear(pud);
370
	pmd_free_tlb(tlb, pmd, start);
371
	mm_dec_nr_pmds(tlb->mm);
372 373 374 375 376 377 378 379 380 381 382 383
}

static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
				   unsigned long addr, unsigned long end,
				   unsigned long floor, unsigned long ceiling)
{
	pud_t *pud;
	unsigned long next;
	unsigned long start;

	start = addr;
	do {
384
		pud = pud_offset(pgd, addr);
385
		next = pud_addr_end(addr, end);
386
		if (!is_hugepd(__hugepd(pud_val(*pud)))) {
387 388
			if (pud_none_or_clear_bad(pud))
				continue;
389
			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
390
					       ceiling);
391
		} else {
392
			unsigned long more;
393 394 395 396 397 398
			/*
			 * Increment next by the size of the huge mapping since
			 * there may be more than one entry at this level for a
			 * single hugepage, but all of them point to
			 * the same kmem cache that holds the hugepte.
			 */
399 400 401 402
			more = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
			if (more > next)
				next = more;

403 404
			free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
					  addr, next, floor, ceiling);
405
		}
406
	} while (addr = next, addr != end);
407 408 409 410 411 412 413 414 415 416 417 418 419 420

	start &= PGDIR_MASK;
	if (start < floor)
		return;
	if (ceiling) {
		ceiling &= PGDIR_MASK;
		if (!ceiling)
			return;
	}
	if (end - 1 > ceiling - 1)
		return;

	pud = pud_offset(pgd, start);
	pgd_clear(pgd);
421
	pud_free_tlb(tlb, pud, start);
422
	mm_dec_nr_puds(tlb->mm);
423 424 425 426 427
}

/*
 * This function frees user-level page tables of a process.
 */
428
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
429 430 431 432 433 434 435
			    unsigned long addr, unsigned long end,
			    unsigned long floor, unsigned long ceiling)
{
	pgd_t *pgd;
	unsigned long next;

	/*
436 437 438 439 440 441 442 443 444 445
	 * Because there are a number of different possible pagetable
	 * layouts for hugepage ranges, we limit knowledge of how
	 * things should be laid out to the allocation path
	 * (huge_pte_alloc(), above).  Everything else works out the
	 * structure as it goes from information in the hugepd
	 * pointers.  That means that we can't here use the
	 * optimization used in the normal page free_pgd_range(), of
	 * checking whether we're actually covering a large enough
	 * range to have to do anything at the top level of the walk
	 * instead of at the bottom.
446
	 *
447 448 449
	 * To make sense of this, you should probably go read the big
	 * block comment at the top of the normal free_pgd_range(),
	 * too.
450 451 452 453
	 */

	do {
		next = pgd_addr_end(addr, end);
Becky Bruce's avatar
Becky Bruce committed
454
		pgd = pgd_offset(tlb->mm, addr);
455
		if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
456 457 458 459
			if (pgd_none_or_clear_bad(pgd))
				continue;
			hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
		} else {
460
			unsigned long more;
Becky Bruce's avatar
Becky Bruce committed
461 462
			/*
			 * Increment next by the size of the huge mapping since
463 464 465
			 * there may be more than one entry at the pgd level
			 * for a single hugepage, but all of them point to the
			 * same kmem cache that holds the hugepte.
Becky Bruce's avatar
Becky Bruce committed
466
			 */
467 468 469 470
			more = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
			if (more > next)
				next = more;

471 472
			free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
					  addr, next, floor, ceiling);
473
		}
Becky Bruce's avatar
Becky Bruce committed
474
	} while (addr = next, addr != end);
Linus Torvalds's avatar
Linus Torvalds committed
475 476
}

477 478 479 480 481 482 483 484 485 486 487 488
struct page *follow_huge_pd(struct vm_area_struct *vma,
			    unsigned long address, hugepd_t hpd,
			    int flags, int pdshift)
{
	pte_t *ptep;
	spinlock_t *ptl;
	struct page *page = NULL;
	unsigned long mask;
	int shift = hugepd_shift(hpd);
	struct mm_struct *mm = vma->vm_mm;

retry:
489 490 491 492
	/*
	 * hugepage directory entries are protected by mm->page_table_lock
	 * Use this instead of huge_pte_lockptr
	 */
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	ptl = &mm->page_table_lock;
	spin_lock(ptl);

	ptep = hugepte_offset(hpd, address, pdshift);
	if (pte_present(*ptep)) {
		mask = (1UL << shift) - 1;
		page = pte_page(*ptep);
		page += ((address & mask) >> PAGE_SHIFT);
		if (flags & FOLL_GET)
			get_page(page);
	} else {
		if (is_hugetlb_entry_migration(*ptep)) {
			spin_unlock(ptl);
			__migration_entry_wait(mm, ptep, ptl);
			goto retry;
		}
	}
	spin_unlock(ptl);
	return page;
}

514
#ifdef CONFIG_PPC_MM_SLICES
Linus Torvalds's avatar
Linus Torvalds committed
515 516 517 518
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
					unsigned long len, unsigned long pgoff,
					unsigned long flags)
{
519 520
	struct hstate *hstate = hstate_file(file);
	int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
521

522
#ifdef CONFIG_PPC_RADIX_MMU
523 524 525
	if (radix_enabled())
		return radix__hugetlb_get_unmapped_area(file, addr, len,
						       pgoff, flags);
526
#endif
527
	return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
Linus Torvalds's avatar
Linus Torvalds committed
528
}
529
#endif
Linus Torvalds's avatar
Linus Torvalds committed
530

531 532
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
533
	/* With radix we don't use slice, so derive it from vma*/
534
	if (IS_ENABLED(CONFIG_PPC_MM_SLICES) && !radix_enabled()) {
535 536
		unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);

537
		return 1UL << mmu_psize_to_shift(psize);
538
	}
539
	return vma_kernel_pagesize(vma);
Becky Bruce's avatar
Becky Bruce committed
540 541
}

542
static int __init add_huge_page_size(unsigned long long size)
543
{
544 545
	int shift = __ffs(size);
	int mmu_psize;
546

547
	/* Check that it is a page size supported by the hardware and
548
	 * that it fits within pagetable and slice limits. */
549
	if (size <= PAGE_SIZE || !is_power_of_2(size))
Becky Bruce's avatar
Becky Bruce committed
550
		return -EINVAL;
551

552
	mmu_psize = check_and_get_huge_psize(shift);
553
	if (mmu_psize < 0)
554 555 556 557 558 559 560 561 562 563 564
		return -EINVAL;

	BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);

	/* Return if huge page size has already been setup */
	if (size_to_hstate(size))
		return 0;

	hugetlb_add_hstate(shift - PAGE_SHIFT);

	return 0;
565 566 567 568 569 570 571 572
}

static int __init hugepage_setup_sz(char *str)
{
	unsigned long long size;

	size = memparse(str, &str);

573 574 575 576
	if (add_huge_page_size(size) != 0) {
		hugetlb_bad_size();
		pr_err("Invalid huge page size specified(%llu)\n", size);
	}
577 578 579 580 581

	return 1;
}
__setup("hugepagesz=", hugepage_setup_sz);

Becky Bruce's avatar
Becky Bruce committed
582 583 584 585
static int __init hugetlbpage_init(void)
{
	int psize;

586 587 588 589 590
	if (hugetlb_disabled) {
		pr_info("HugeTLB support is disabled!\n");
		return 0;
	}

591 592
	if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled() &&
	    !mmu_has_feature(MMU_FTR_16M_PAGE))
593
		return -ENODEV;
594

595 596 597
	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
		unsigned shift;
		unsigned pdshift;
598

599 600
		if (!mmu_psize_defs[psize].shift)
			continue;
601

602 603
		shift = mmu_psize_to_shift(psize);

604 605
#ifdef CONFIG_PPC_BOOK3S_64
		if (shift > PGDIR_SHIFT)
606
			continue;
607 608 609 610 611 612 613
		else if (shift > PUD_SHIFT)
			pdshift = PGDIR_SHIFT;
		else if (shift > PMD_SHIFT)
			pdshift = PUD_SHIFT;
		else
			pdshift = PMD_SHIFT;
#else
614
		if (shift < PUD_SHIFT)
615
			pdshift = PMD_SHIFT;
616
		else if (shift < PGDIR_SHIFT)
617 618 619
			pdshift = PUD_SHIFT;
		else
			pdshift = PGDIR_SHIFT;
620 621 622 623
#endif

		if (add_huge_page_size(1ULL << shift) < 0)
			continue;
624 625 626 627
		/*
		 * if we have pdshift and shift value same, we don't
		 * use pgt cache for hugepd.
		 */
628 629 630
		if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
			pgtable_cache_add(PTE_INDEX_SIZE);
		else if (pdshift > shift)
631
			pgtable_cache_add(pdshift - shift);
632
		else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
633
			pgtable_cache_add(PTE_T_ORDER);
634
	}
635

636
	if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
637 638
		hugetlbpage_init_default();

639 640
	return 0;
}
641

642
arch_initcall(hugetlbpage_init);
643 644 645 646

void flush_dcache_icache_hugepage(struct page *page)
{
	int i;
Becky Bruce's avatar
Becky Bruce committed
647
	void *start;
648 649 650

	BUG_ON(!PageCompound(page));

Becky Bruce's avatar
Becky Bruce committed
651 652 653 654
	for (i = 0; i < (1UL << compound_order(page)); i++) {
		if (!PageHighMem(page)) {
			__flush_dcache_icache(page_address(page+i));
		} else {
655
			start = kmap_atomic(page+i);
Becky Bruce's avatar
Becky Bruce committed
656
			__flush_dcache_icache(start);
657
			kunmap_atomic(start);
Becky Bruce's avatar
Becky Bruce committed
658 659
		}
	}
660
}