machine_kexec_64.c 6.3 KB
Newer Older
1
/*
Dave Jones's avatar
Dave Jones committed
2
 * handle transition of Linux booting another kernel
3 4 5 6 7 8 9 10 11 12
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
#include <linux/reboot.h>
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
13
#include <linux/numa.h>
Ingo Molnar's avatar
Ingo Molnar committed
14 15
#include <linux/ftrace.h>

16 17 18 19
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
20 21

static void init_level2_page(pmd_t *level2p, unsigned long addr)
22 23
{
	unsigned long end_addr;
Maneesh Soni's avatar
Maneesh Soni committed
24

25
	addr &= PAGE_MASK;
26
	end_addr = addr + PUD_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
27
	while (addr < end_addr) {
28 29
		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
		addr += PMD_SIZE;
30 31 32
	}
}

33
static int init_level3_page(struct kimage *image, pud_t *level3p,
Maneesh Soni's avatar
Maneesh Soni committed
34
				unsigned long addr, unsigned long last_addr)
35 36 37
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
38

39 40
	result = 0;
	addr &= PAGE_MASK;
41
	end_addr = addr + PGDIR_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
42
	while ((addr < last_addr) && (addr < end_addr)) {
43
		struct page *page;
44
		pmd_t *level2p;
Maneesh Soni's avatar
Maneesh Soni committed
45

46 47 48 49 50
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
51
		level2p = (pmd_t *)page_address(page);
52
		init_level2_page(level2p, addr);
53 54
		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
		addr += PUD_SIZE;
55 56
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
57
	while (addr < end_addr) {
58 59
		pud_clear(level3p++);
		addr += PUD_SIZE;
60 61 62 63 64 65
	}
out:
	return result;
}


66
static int init_level4_page(struct kimage *image, pgd_t *level4p,
Maneesh Soni's avatar
Maneesh Soni committed
67
				unsigned long addr, unsigned long last_addr)
68 69 70
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
71

72 73
	result = 0;
	addr &= PAGE_MASK;
74
	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
Maneesh Soni's avatar
Maneesh Soni committed
75
	while ((addr < last_addr) && (addr < end_addr)) {
76
		struct page *page;
77
		pud_t *level3p;
Maneesh Soni's avatar
Maneesh Soni committed
78

79 80 81 82 83
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
84
		level3p = (pud_t *)page_address(page);
85 86 87 88
		result = init_level3_page(image, level3p, addr, last_addr);
		if (result) {
			goto out;
		}
89 90
		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
		addr += PGDIR_SIZE;
91 92
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
93
	while (addr < end_addr) {
94 95
		pgd_clear(level4p++);
		addr += PGDIR_SIZE;
96
	}
Maneesh Soni's avatar
Maneesh Soni committed
97
out:
98 99 100
	return result;
}

101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
static void free_transition_pgtable(struct kimage *image)
{
	free_page((unsigned long)image->arch.pud);
	free_page((unsigned long)image->arch.pmd);
	free_page((unsigned long)image->arch.pte);
}

static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr, paddr;
	int result = -ENOMEM;

	vaddr = (unsigned long)relocate_kernel;
	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
		if (!pud)
			goto err;
		image->arch.pud = pud;
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(pgd, vaddr);
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
		if (!pmd)
			goto err;
		image->arch.pmd = pmd;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
		if (!pte)
			goto err;
		image->arch.pte = pte;
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
	return 0;
err:
	free_transition_pgtable(image);
	return result;
}

150 151 152

static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
153
	pgd_t *level4p;
154
	int result;
155
	level4p = (pgd_t *)__va(start_pgtable);
156 157 158 159
	result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
	if (result)
		return result;
	return init_transition_pgtable(image, level4p);
160 161 162 163
}

static void set_idt(void *newidt, u16 limit)
{
164
	struct desc_ptr curidt;
165 166

	/* x86-64 supports unaliged loads & stores */
167 168
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
169 170

	__asm__ __volatile__ (
171 172
		"lidtq %0\n"
		: : "m" (curidt)
173 174 175 176 177 178
		);
};


static void set_gdt(void *newgdt, u16 limit)
{
179
	struct desc_ptr curgdt;
180 181

	/* x86-64 supports unaligned loads & stores */
182 183
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
184 185

	__asm__ __volatile__ (
186 187
		"lgdtq %0\n"
		: : "m" (curgdt)
188 189 190 191 192 193
		);
};

static void load_segments(void)
{
	__asm__ __volatile__ (
194 195 196 197 198
		"\tmovl %0,%%ds\n"
		"\tmovl %0,%%es\n"
		"\tmovl %0,%%ss\n"
		"\tmovl %0,%%fs\n"
		"\tmovl %0,%%gs\n"
Michael Matz's avatar
Michael Matz committed
199
		: : "a" (__KERNEL_DS) : "memory"
200 201 202 203 204
		);
}

int machine_kexec_prepare(struct kimage *image)
{
205
	unsigned long start_pgtable;
206 207 208
	int result;

	/* Calculate the offsets */
Maneesh Soni's avatar
Maneesh Soni committed
209
	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
210 211 212

	/* Setup the identity mapped 64bit page table */
	result = init_pgtable(image, start_pgtable);
Maneesh Soni's avatar
Maneesh Soni committed
213
	if (result)
214 215 216 217 218 219 220
		return result;

	return 0;
}

void machine_kexec_cleanup(struct kimage *image)
{
221
	free_transition_pgtable(image);
222 223 224 225 226 227
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
Huang Ying's avatar
Huang Ying committed
228
void machine_kexec(struct kimage *image)
229
{
230 231
	unsigned long page_list[PAGES_NR];
	void *control_page;
232

Ingo Molnar's avatar
Ingo Molnar committed
233 234
	tracer_disable();

235 236 237
	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

238 239 240
	control_page = page_address(image->control_code_page) + PAGE_SIZE;
	memcpy(control_page, relocate_kernel, PAGE_SIZE);

241
	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
242 243
	page_list[PA_TABLE_PAGE] =
	  (unsigned long)__pa(page_address(image->control_code_page));
244

245 246 247 248 249
	/* The segment registers are funny things, they have both a
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
250 251 252 253 254 255 256 257 258 259
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
	/* The gdt & idt are now invalid.
	 * If you want to load them you must set up your own idt & gdt.
	 */
	set_gdt(phys_to_virt(0),0);
	set_idt(phys_to_virt(0),0);
260

261
	/* now call it */
262 263
	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
			image->start);
264
}
265

Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
266 267
void arch_crash_save_vmcoreinfo(void)
{
268
	VMCOREINFO_SYMBOL(phys_base);
269
	VMCOREINFO_SYMBOL(init_level4_pgt);
270 271 272 273 274

#ifdef CONFIG_NUMA
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
275 276
}