machine_kexec_64.c 8.26 KB
Newer Older
1
/*
Dave Jones's avatar
Dave Jones committed
2
 * handle transition of Linux booting another kernel
3 4 5 6 7 8 9 10 11
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
12
#include <linux/gfp.h>
13
#include <linux/reboot.h>
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
14
#include <linux/numa.h>
Ingo Molnar's avatar
Ingo Molnar committed
15
#include <linux/ftrace.h>
16
#include <linux/io.h>
17
#include <linux/suspend.h>
Ingo Molnar's avatar
Ingo Molnar committed
18

19 20 21
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
22
#include <asm/debugreg.h>
23

24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
static int init_one_level2_page(struct kimage *image, pgd_t *pgd,
				unsigned long addr)
{
	pud_t *pud;
	pmd_t *pmd;
	struct page *page;
	int result = -ENOMEM;

	addr &= PMD_MASK;
	pgd += pgd_index(addr);
	if (!pgd_present(*pgd)) {
		page = kimage_alloc_control_pages(image, 0);
		if (!page)
			goto out;
		pud = (pud_t *)page_address(page);
39
		clear_page(pud);
40 41 42 43 44 45 46 47
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(pgd, addr);
	if (!pud_present(*pud)) {
		page = kimage_alloc_control_pages(image, 0);
		if (!page)
			goto out;
		pmd = (pmd_t *)page_address(page);
48
		clear_page(pmd);
49 50 51 52 53 54 55 56 57 58
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, addr);
	if (!pmd_present(*pmd))
		set_pmd(pmd, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
	result = 0;
out:
	return result;
}

59
static void init_level2_page(pmd_t *level2p, unsigned long addr)
60 61
{
	unsigned long end_addr;
Maneesh Soni's avatar
Maneesh Soni committed
62

63
	addr &= PAGE_MASK;
64
	end_addr = addr + PUD_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
65
	while (addr < end_addr) {
66 67
		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
		addr += PMD_SIZE;
68 69 70
	}
}

71
static int init_level3_page(struct kimage *image, pud_t *level3p,
Maneesh Soni's avatar
Maneesh Soni committed
72
				unsigned long addr, unsigned long last_addr)
73 74 75
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
76

77 78
	result = 0;
	addr &= PAGE_MASK;
79
	end_addr = addr + PGDIR_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
80
	while ((addr < last_addr) && (addr < end_addr)) {
81
		struct page *page;
82
		pmd_t *level2p;
Maneesh Soni's avatar
Maneesh Soni committed
83

84 85 86 87 88
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
89
		level2p = (pmd_t *)page_address(page);
90
		init_level2_page(level2p, addr);
91 92
		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
		addr += PUD_SIZE;
93 94
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
95
	while (addr < end_addr) {
96 97
		pud_clear(level3p++);
		addr += PUD_SIZE;
98 99 100 101 102 103
	}
out:
	return result;
}


104
static int init_level4_page(struct kimage *image, pgd_t *level4p,
Maneesh Soni's avatar
Maneesh Soni committed
105
				unsigned long addr, unsigned long last_addr)
106 107 108
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
109

110 111
	result = 0;
	addr &= PAGE_MASK;
112
	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
Maneesh Soni's avatar
Maneesh Soni committed
113
	while ((addr < last_addr) && (addr < end_addr)) {
114
		struct page *page;
115
		pud_t *level3p;
Maneesh Soni's avatar
Maneesh Soni committed
116

117 118 119 120 121
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
122
		level3p = (pud_t *)page_address(page);
123
		result = init_level3_page(image, level3p, addr, last_addr);
124
		if (result)
125
			goto out;
126 127
		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
		addr += PGDIR_SIZE;
128 129
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
130
	while (addr < end_addr) {
131 132
		pgd_clear(level4p++);
		addr += PGDIR_SIZE;
133
	}
Maneesh Soni's avatar
Maneesh Soni committed
134
out:
135 136 137
	return result;
}

138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
static void free_transition_pgtable(struct kimage *image)
{
	free_page((unsigned long)image->arch.pud);
	free_page((unsigned long)image->arch.pmd);
	free_page((unsigned long)image->arch.pte);
}

static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
{
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr, paddr;
	int result = -ENOMEM;

	vaddr = (unsigned long)relocate_kernel;
	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
		if (!pud)
			goto err;
		image->arch.pud = pud;
		set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
	}
	pud = pud_offset(pgd, vaddr);
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
		if (!pmd)
			goto err;
		image->arch.pmd = pmd;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
		if (!pte)
			goto err;
		image->arch.pte = pte;
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
	return 0;
err:
	free_transition_pgtable(image);
	return result;
}

187 188 189

static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
190
	pgd_t *level4p;
191
	int result;
192
	level4p = (pgd_t *)__va(start_pgtable);
193
	result = init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
194 195 196 197 198 199 200
	if (result)
		return result;
	/*
	 * image->start may be outside 0 ~ max_pfn, for example when
	 * jump back to original kernel from kexeced kernel
	 */
	result = init_one_level2_page(image, level4p, image->start);
201 202 203
	if (result)
		return result;
	return init_transition_pgtable(image, level4p);
204 205 206 207
}

static void set_idt(void *newidt, u16 limit)
{
208
	struct desc_ptr curidt;
209 210

	/* x86-64 supports unaliged loads & stores */
211 212
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
213 214

	__asm__ __volatile__ (
215 216
		"lidtq %0\n"
		: : "m" (curidt)
217 218 219 220 221 222
		);
};


static void set_gdt(void *newgdt, u16 limit)
{
223
	struct desc_ptr curgdt;
224 225

	/* x86-64 supports unaligned loads & stores */
226 227
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
228 229

	__asm__ __volatile__ (
230 231
		"lgdtq %0\n"
		: : "m" (curgdt)
232 233 234 235 236 237
		);
};

static void load_segments(void)
{
	__asm__ __volatile__ (
238 239 240 241 242
		"\tmovl %0,%%ds\n"
		"\tmovl %0,%%es\n"
		"\tmovl %0,%%ss\n"
		"\tmovl %0,%%fs\n"
		"\tmovl %0,%%gs\n"
Michael Matz's avatar
Michael Matz committed
243
		: : "a" (__KERNEL_DS) : "memory"
244 245 246 247 248
		);
}

int machine_kexec_prepare(struct kimage *image)
{
249
	unsigned long start_pgtable;
250 251 252
	int result;

	/* Calculate the offsets */
Maneesh Soni's avatar
Maneesh Soni committed
253
	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
254 255 256

	/* Setup the identity mapped 64bit page table */
	result = init_pgtable(image, start_pgtable);
Maneesh Soni's avatar
Maneesh Soni committed
257
	if (result)
258 259 260 261 262 263 264
		return result;

	return 0;
}

void machine_kexec_cleanup(struct kimage *image)
{
265
	free_transition_pgtable(image);
266 267 268 269 270 271
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
Huang Ying's avatar
Huang Ying committed
272
void machine_kexec(struct kimage *image)
273
{
274 275
	unsigned long page_list[PAGES_NR];
	void *control_page;
276
	int save_ftrace_enabled;
277

278
#ifdef CONFIG_KEXEC_JUMP
279
	if (image->preserve_context)
280 281 282 283
		save_processor_state();
#endif

	save_ftrace_enabled = __ftrace_enabled_save();
Ingo Molnar's avatar
Ingo Molnar committed
284

285 286
	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();
287
	hw_breakpoint_disable();
288

289 290 291 292 293 294 295 296 297 298 299 300 301
	if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC
		/*
		 * We need to put APICs in legacy mode so that we can
		 * get timer interrupts in second kernel. kexec/kdump
		 * paths already have calls to disable_IO_APIC() in
		 * one form or other. kexec jump path also need
		 * one.
		 */
		disable_IO_APIC();
#endif
	}

302
	control_page = page_address(image->control_code_page) + PAGE_SIZE;
303
	memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
304

305
	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
306
	page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
307 308
	page_list[PA_TABLE_PAGE] =
	  (unsigned long)__pa(page_address(image->control_code_page));
309

310 311 312 313
	if (image->type == KEXEC_TYPE_DEFAULT)
		page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
						<< PAGE_SHIFT);

314 315
	/*
	 * The segment registers are funny things, they have both a
316 317 318 319
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
320 321 322 323 324
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
325 326
	/*
	 * The gdt & idt are now invalid.
327 328
	 * If you want to load them you must set up your own idt & gdt.
	 */
329 330
	set_gdt(phys_to_virt(0), 0);
	set_idt(phys_to_virt(0), 0);
331

332
	/* now call it */
333 334 335 336 337 338
	image->start = relocate_kernel((unsigned long)image->head,
				       (unsigned long)page_list,
				       image->start,
				       image->preserve_context);

#ifdef CONFIG_KEXEC_JUMP
339
	if (image->preserve_context)
340 341 342 343
		restore_processor_state();
#endif

	__ftrace_enabled_restore(save_ftrace_enabled);
344
}
345

Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
346 347
void arch_crash_save_vmcoreinfo(void)
{
348
	VMCOREINFO_SYMBOL(phys_base);
349
	VMCOREINFO_SYMBOL(init_level4_pgt);
350 351 352 353 354

#ifdef CONFIG_NUMA
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
355 356
}