machine_kexec_64.c 8.9 KB
Newer Older
1
/*
Dave Jones's avatar
Dave Jones committed
2
 * handle transition of Linux booting another kernel
3 4 5 6 7 8 9 10 11
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
12
#include <linux/gfp.h>
13
#include <linux/reboot.h>
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
14
#include <linux/numa.h>
Ingo Molnar's avatar
Ingo Molnar committed
15
#include <linux/ftrace.h>
16
#include <linux/io.h>
17
#include <linux/suspend.h>
Ingo Molnar's avatar
Ingo Molnar committed
18

19 20 21
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
22
#include <asm/debugreg.h>
23

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
				unsigned long addr)
{
	pud_t *pud;
	pmd_t *pmd;
	struct page *page;
	int result = -ENOMEM;

	addr &= PMD_MASK;
	pgd += pgd_index(addr);
	if (!pgd_present(*pgd)) {
		page = kimage_alloc_control_pages(image, 0);
		if (!page)
			goto out;
		pud = (pud_t *)page_address(page);
39
		clear_page(pud);
40 41 42 43 44 45 46 47
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(pgd, addr);
	if (!pud_present(*pud)) {
		page = kimage_alloc_control_pages(image, 0);
		if (!page)
			goto out;
		pmd = (pmd_t *)page_address(page);
48
		clear_page(pmd);
49 50 51 52 53 54 55 56 57 58
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, addr);
	if (!pmd_present(*pmd))
		set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
	result = 0;
out:
	return result;
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
static int ident_mapping_init(struct kimage *image, pgd_t *level4p,
				unsigned long mstart, unsigned long mend)
{
	int result;

	mstart = round_down(mstart, PMD_SIZE);
	mend   = round_up(mend - 1, PMD_SIZE);

	while (mstart < mend) {
		result = init_one_level2_page(image, level4p, mstart);
		if (result)
			return result;

		mstart += PMD_SIZE;
	}

	return 0;
}

78
static void init_level2_page(pmd_t *level2p, unsigned long addr)
79 80
{
	unsigned long end_addr;
Maneesh Soni's avatar
Maneesh Soni committed
81

82
	addr &= PAGE_MASK;
83
	end_addr = addr + PUD_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
84
	while (addr < end_addr) {
85 86
		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
		addr += PMD_SIZE;
87 88 89
	}
}

90
static int init_level3_page(struct kimage *image, pud_t *level3p,
Maneesh Soni's avatar
Maneesh Soni committed
91
				unsigned long addr, unsigned long last_addr)
92 93 94
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
95

96 97
	result = 0;
	addr &= PAGE_MASK;
98
	end_addr = addr + PGDIR_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
99
	while ((addr < last_addr) && (addr < end_addr)) {
100
		struct page *page;
101
		pmd_t *level2p;
Maneesh Soni's avatar
Maneesh Soni committed
102

103 104 105 106 107
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
108
		level2p = (pmd_t *)page_address(page);
109
		init_level2_page(level2p, addr);
110 111
		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
		addr += PUD_SIZE;
112 113
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
114
	while (addr < end_addr) {
115 116
		pud_clear(level3p++);
		addr += PUD_SIZE;
117 118 119 120 121 122
	}
out:
	return result;
}


123
static int init_level4_page(struct kimage *image, pgd_t *level4p,
Maneesh Soni's avatar
Maneesh Soni committed
124
				unsigned long addr, unsigned long last_addr)
125 126 127
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
128

129 130
	result = 0;
	addr &= PAGE_MASK;
131
	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
Maneesh Soni's avatar
Maneesh Soni committed
132
	while ((addr < last_addr) && (addr < end_addr)) {
133
		struct page *page;
134
		pud_t *level3p;
Maneesh Soni's avatar
Maneesh Soni committed
135

136 137 138 139 140
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
141
		level3p = (pud_t *)page_address(page);
142
		result = init_level3_page(image, level3p, addr, last_addr);
143
		if (result)
144
			goto out;
145 146
		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
		addr += PGDIR_SIZE;
147 148
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
149
	while (addr < end_addr) {
150 151
		pgd_clear(level4p++);
		addr += PGDIR_SIZE;
152
	}
Maneesh Soni's avatar
Maneesh Soni committed
153
out:
154 155 156
	return result;
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static void free_transition_pgtable(struct kimage *image)
{
	free_page((unsigned long)image->arch.pud);
	free_page((unsigned long)image->arch.pmd);
	free_page((unsigned long)image->arch.pte);
}

static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr, paddr;
	int result = -ENOMEM;

	vaddr = (unsigned long)relocate_kernel;
	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
		if (!pud)
			goto err;
		image->arch.pud = pud;
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(pgd, vaddr);
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
		if (!pmd)
			goto err;
		image->arch.pmd = pmd;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
		if (!pte)
			goto err;
		image->arch.pte = pte;
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
	return 0;
err:
	free_transition_pgtable(image);
	return result;
}

206 207
static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
208
	unsigned long mstart, mend;
209
	pgd_t *level4p;
210
	int result;
211 212
	int i;

213
	level4p = (pgd_t *)__va(start_pgtable);
214
	result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
215 216
	if (result)
		return result;
217

218
	/*
219 220 221 222
	 * segments's mem ranges could be outside 0 ~ max_pfn,
	 * for example when jump back to original kernel from kexeced kernel.
	 * or first kernel is booted with user mem map, and second kernel
	 * could be loaded out of that range.
223
	 */
224 225 226 227 228 229 230 231 232 233
	for (i = 0; i < image->nr_segments; i++) {
		mstart = image->segment[i].mem;
		mend   = mstart + image->segment[i].memsz;

		result = ident_mapping_init(image, level4p, mstart, mend);

		if (result)
			return result;
	}

234
	return init_transition_pgtable(image, level4p);
235 236 237 238
}

static void set_idt(void *newidt, u16 limit)
{
239
	struct desc_ptr curidt;
240 241

	/* x86-64 supports unaliged loads & stores */
242 243
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
244 245

	__asm__ __volatile__ (
246 247
		"lidtq %0\n"
		: : "m" (curidt)
248 249 250 251 252 253
		);
};


static void set_gdt(void *newgdt, u16 limit)
{
254
	struct desc_ptr curgdt;
255 256

	/* x86-64 supports unaligned loads & stores */
257 258
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
259 260

	__asm__ __volatile__ (
261 262
		"lgdtq %0\n"
		: : "m" (curgdt)
263 264 265 266 267 268
		);
};

static void load_segments(void)
{
	__asm__ __volatile__ (
269 270 271 272 273
		"\tmovl %0,%%ds\n"
		"\tmovl %0,%%es\n"
		"\tmovl %0,%%ss\n"
		"\tmovl %0,%%fs\n"
		"\tmovl %0,%%gs\n"
Michael Matz's avatar
Michael Matz committed
274
		: : "a" (__KERNEL_DS) : "memory"
275 276 277 278 279
		);
}

int machine_kexec_prepare(struct kimage *image)
{
280
	unsigned long start_pgtable;
281 282 283
	int result;

	/* Calculate the offsets */
Maneesh Soni's avatar
Maneesh Soni committed
284
	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
285 286 287

	/* Setup the identity mapped 64bit page table */
	result = init_pgtable(image, start_pgtable);
Maneesh Soni's avatar
Maneesh Soni committed
288
	if (result)
289 290 291 292 293 294 295
		return result;

	return 0;
}

void machine_kexec_cleanup(struct kimage *image)
{
296
	free_transition_pgtable(image);
297 298 299 300 301 302
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
Huang Ying's avatar
Huang Ying committed
303
void machine_kexec(struct kimage *image)
304
{
305 306
	unsigned long page_list[PAGES_NR];
	void *control_page;
307
	int save_ftrace_enabled;
308

309
#ifdef CONFIG_KEXEC_JUMP
310
	if (image->preserve_context)
311 312 313 314
		save_processor_state();
#endif

	save_ftrace_enabled = __ftrace_enabled_save();
Ingo Molnar's avatar
Ingo Molnar committed
315

316 317
	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();
318
	hw_breakpoint_disable();
319

320 321 322 323 324 325 326 327 328 329 330 331 332
	if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC
		/*
		 * We need to put APICs in legacy mode so that we can
		 * get timer interrupts in second kernel. kexec/kdump
		 * paths already have calls to disable_IO_APIC() in
		 * one form or other. kexec jump path also need
		 * one.
		 */
		disable_IO_APIC();
#endif
	}

333
	control_page = page_address(image->control_code_page) + PAGE_SIZE;
334
	memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
335

336
	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
337
	page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
338 339
	page_list[PA_TABLE_PAGE] =
	  (unsigned long)__pa(page_address(image->control_code_page));
340

341 342 343 344
	if (image->type == KEXEC_TYPE_DEFAULT)
		page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
						<< PAGE_SHIFT);

345 346
	/*
	 * The segment registers are funny things, they have both a
347 348 349 350
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
351 352 353 354 355
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
356 357
	/*
	 * The gdt & idt are now invalid.
358 359
	 * If you want to load them you must set up your own idt & gdt.
	 */
360 361
	set_gdt(phys_to_virt(0), 0);
	set_idt(phys_to_virt(0), 0);
362

363
	/* now call it */
364 365 366 367 368 369
	image->start = relocate_kernel((unsigned long)image->head,
				       (unsigned long)page_list,
				       image->start,
				       image->preserve_context);

#ifdef CONFIG_KEXEC_JUMP
370
	if (image->preserve_context)
371 372 373 374
		restore_processor_state();
#endif

	__ftrace_enabled_restore(save_ftrace_enabled);
375
}
376

Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
377 378
void arch_crash_save_vmcoreinfo(void)
{
379
	VMCOREINFO_SYMBOL(phys_base);
380
	VMCOREINFO_SYMBOL(init_level4_pgt);
381 382 383 384 385

#ifdef CONFIG_NUMA
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
386 387
}