ident_map.c 2.94 KB
Newer Older
1 2 3 4
/*
 * Helper routines for building identity mapping page tables. This is
 * included by both the compressed kernel and the regular kernel.
 */
5

6
static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
7 8 9 10 11 12
			   unsigned long addr, unsigned long end)
{
	addr &= PMD_MASK;
	for (; addr < end; addr += PMD_SIZE) {
		pmd_t *pmd = pmd_page + pmd_index(addr);

13 14 15
		if (pmd_present(*pmd))
			continue;

16
		set_pmd(pmd, __pmd((addr - info->offset) | info->page_flag));
17 18
	}
}
19

20 21 22 23 24 25 26 27 28 29 30 31 32
static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
			  unsigned long addr, unsigned long end)
{
	unsigned long next;

	for (; addr < end; addr = next) {
		pud_t *pud = pud_page + pud_index(addr);
		pmd_t *pmd;

		next = (addr & PUD_MASK) + PUD_SIZE;
		if (next > end)
			next = end;

33 34 35 36 37 38 39 40 41 42 43 44
		if (info->direct_gbpages) {
			pud_t pudval;

			if (pud_present(*pud))
				continue;

			addr &= PUD_MASK;
			pudval = __pud((addr - info->offset) | info->page_flag);
			set_pud(pud, pudval);
			continue;
		}

45 46
		if (pud_present(*pud)) {
			pmd = pmd_offset(pud, 0);
47
			ident_pmd_init(info, pmd, addr, next);
48 49 50 51 52
			continue;
		}
		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
		if (!pmd)
			return -ENOMEM;
53
		ident_pmd_init(info, pmd, addr, next);
54 55 56 57 58 59
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}

	return 0;
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page,
			  unsigned long addr, unsigned long end)
{
	unsigned long next;

	for (; addr < end; addr = next) {
		p4d_t *p4d = p4d_page + p4d_index(addr);
		pud_t *pud;

		next = (addr & P4D_MASK) + P4D_SIZE;
		if (next > end)
			next = end;

		if (p4d_present(*p4d)) {
			pud = pud_offset(p4d, 0);
			ident_pud_init(info, pud, addr, next);
			continue;
		}
		pud = (pud_t *)info->alloc_pgt_page(info->context);
		if (!pud)
			return -ENOMEM;
		ident_pud_init(info, pud, addr, next);
		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
	}

	return 0;
}

88
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
89
			      unsigned long pstart, unsigned long pend)
90
{
91 92
	unsigned long addr = pstart + info->offset;
	unsigned long end = pend + info->offset;
93 94 95 96
	unsigned long next;
	int result;

	for (; addr < end; addr = next) {
97
		pgd_t *pgd = pgd_page + pgd_index(addr);
98
		p4d_t *p4d;
99 100 101 102 103 104

		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
		if (next > end)
			next = end;

		if (pgd_present(*pgd)) {
105 106
			p4d = p4d_offset(pgd, 0);
			result = ident_p4d_init(info, p4d, addr, next);
107 108 109 110 111
			if (result)
				return result;
			continue;
		}

112 113
		p4d = (p4d_t *)info->alloc_pgt_page(info->context);
		if (!p4d)
114
			return -ENOMEM;
115
		result = ident_p4d_init(info, p4d, addr, next);
116 117
		if (result)
			return result;
118 119 120 121 122 123 124 125 126 127
		if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
			set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
		} else {
			/*
			 * With p4d folded, pgd is equal to p4d.
			 * The pgd entry has to point to the pud page table in this case.
			 */
			pud_t *pud = pud_offset(p4d, 0);
			set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
		}
128 129 130 131
	}

	return 0;
}