machine_kexec_64.c 6.31 KB
Newer Older
1
/*
Dave Jones's avatar
Dave Jones committed
2
 * handle transition of Linux booting another kernel
3 4 5 6 7 8 9 10 11 12
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
#include <linux/reboot.h>
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
13
#include <linux/numa.h>
Ingo Molnar's avatar
Ingo Molnar committed
14
#include <linux/ftrace.h>
15
#include <linux/io.h>
Ingo Molnar's avatar
Ingo Molnar committed
16

17 18 19
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
20 21

static void init_level2_page(pmd_t *level2p, unsigned long addr)
22 23
{
	unsigned long end_addr;
Maneesh Soni's avatar
Maneesh Soni committed
24

25
	addr &= PAGE_MASK;
26
	end_addr = addr + PUD_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
27
	while (addr < end_addr) {
28 29
		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
		addr += PMD_SIZE;
30 31 32
	}
}

33
static int init_level3_page(struct kimage *image, pud_t *level3p,
Maneesh Soni's avatar
Maneesh Soni committed
34
				unsigned long addr, unsigned long last_addr)
35 36 37
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
38

39 40
	result = 0;
	addr &= PAGE_MASK;
41
	end_addr = addr + PGDIR_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
42
	while ((addr < last_addr) && (addr < end_addr)) {
43
		struct page *page;
44
		pmd_t *level2p;
Maneesh Soni's avatar
Maneesh Soni committed
45

46 47 48 49 50
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
51
		level2p = (pmd_t *)page_address(page);
52
		init_level2_page(level2p, addr);
53 54
		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
		addr += PUD_SIZE;
55 56
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
57
	while (addr < end_addr) {
58 59
		pud_clear(level3p++);
		addr += PUD_SIZE;
60 61 62 63 64 65
	}
out:
	return result;
}


66
static int init_level4_page(struct kimage *image, pgd_t *level4p,
Maneesh Soni's avatar
Maneesh Soni committed
67
				unsigned long addr, unsigned long last_addr)
68 69 70
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
71

72 73
	result = 0;
	addr &= PAGE_MASK;
74
	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
Maneesh Soni's avatar
Maneesh Soni committed
75
	while ((addr < last_addr) && (addr < end_addr)) {
76
		struct page *page;
77
		pud_t *level3p;
Maneesh Soni's avatar
Maneesh Soni committed
78

79 80 81 82 83
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
84
		level3p = (pud_t *)page_address(page);
85
		result = init_level3_page(image, level3p, addr, last_addr);
86
		if (result)
87
			goto out;
88 89
		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
		addr += PGDIR_SIZE;
90 91
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
92
	while (addr < end_addr) {
93 94
		pgd_clear(level4p++);
		addr += PGDIR_SIZE;
95
	}
Maneesh Soni's avatar
Maneesh Soni committed
96
out:
97 98 99
	return result;
}

100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
static void free_transition_pgtable(struct kimage *image)
{
	free_page((unsigned long)image->arch.pud);
	free_page((unsigned long)image->arch.pmd);
	free_page((unsigned long)image->arch.pte);
}

static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr, paddr;
	int result = -ENOMEM;

	vaddr = (unsigned long)relocate_kernel;
	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
		if (!pud)
			goto err;
		image->arch.pud = pud;
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(pgd, vaddr);
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
		if (!pmd)
			goto err;
		image->arch.pmd = pmd;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
		if (!pte)
			goto err;
		image->arch.pte = pte;
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
	return 0;
err:
	free_transition_pgtable(image);
	return result;
}

149 150 151

static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
152
	pgd_t *level4p;
153
	int result;
154
	level4p = (pgd_t *)__va(start_pgtable);
155 156 157 158
	result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
	if (result)
		return result;
	return init_transition_pgtable(image, level4p);
159 160 161 162
}

static void set_idt(void *newidt, u16 limit)
{
163
	struct desc_ptr curidt;
164 165

	/* x86-64 supports unaliged loads & stores */
166 167
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
168 169

	__asm__ __volatile__ (
170 171
		"lidtq %0\n"
		: : "m" (curidt)
172 173 174 175 176 177
		);
};


static void set_gdt(void *newgdt, u16 limit)
{
178
	struct desc_ptr curgdt;
179 180

	/* x86-64 supports unaligned loads & stores */
181 182
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
183 184

	__asm__ __volatile__ (
185 186
		"lgdtq %0\n"
		: : "m" (curgdt)
187 188 189 190 191 192
		);
};

static void load_segments(void)
{
	__asm__ __volatile__ (
193 194 195 196 197
		"\tmovl %0,%%ds\n"
		"\tmovl %0,%%es\n"
		"\tmovl %0,%%ss\n"
		"\tmovl %0,%%fs\n"
		"\tmovl %0,%%gs\n"
Michael Matz's avatar
Michael Matz committed
198
		: : "a" (__KERNEL_DS) : "memory"
199 200 201 202 203
		);
}

int machine_kexec_prepare(struct kimage *image)
{
204
	unsigned long start_pgtable;
205 206 207
	int result;

	/* Calculate the offsets */
Maneesh Soni's avatar
Maneesh Soni committed
208
	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
209 210 211

	/* Setup the identity mapped 64bit page table */
	result = init_pgtable(image, start_pgtable);
Maneesh Soni's avatar
Maneesh Soni committed
212
	if (result)
213 214 215 216 217 218 219
		return result;

	return 0;
}

void machine_kexec_cleanup(struct kimage *image)
{
220
	free_transition_pgtable(image);
221 222 223 224 225 226
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
Huang Ying's avatar
Huang Ying committed
227
void machine_kexec(struct kimage *image)
228
{
229 230
	unsigned long page_list[PAGES_NR];
	void *control_page;
231

Ingo Molnar's avatar
Ingo Molnar committed
232 233
	tracer_disable();

234 235 236
	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

237 238 239
	control_page = page_address(image->control_code_page) + PAGE_SIZE;
	memcpy(control_page, relocate_kernel, PAGE_SIZE);

240
	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
241 242
	page_list[PA_TABLE_PAGE] =
	  (unsigned long)__pa(page_address(image->control_code_page));
243

244 245
	/*
	 * The segment registers are funny things, they have both a
246 247 248 249
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
250 251 252 253 254
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
255 256
	/*
	 * The gdt & idt are now invalid.
257 258
	 * If you want to load them you must set up your own idt & gdt.
	 */
259 260
	set_gdt(phys_to_virt(0), 0);
	set_idt(phys_to_virt(0), 0);
261

262
	/* now call it */
263 264
	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
			image->start);
265
}
266

Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
267 268
void arch_crash_save_vmcoreinfo(void)
{
269
	VMCOREINFO_SYMBOL(phys_base);
270
	VMCOREINFO_SYMBOL(init_level4_pgt);
271 272 273 274 275

#ifdef CONFIG_NUMA
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
276 277
}