machine_kexec_64.c 14.5 KB
Newer Older
1
/*
Dave Jones's avatar
Dave Jones committed
2
 * handle transition of Linux booting another kernel
3 4 5 6 7 8
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

9 10
#define pr_fmt(fmt)	"kexec: " fmt

11 12 13
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
14
#include <linux/gfp.h>
15
#include <linux/reboot.h>
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
16
#include <linux/numa.h>
Ingo Molnar's avatar
Ingo Molnar committed
17
#include <linux/ftrace.h>
18
#include <linux/io.h>
19
#include <linux/suspend.h>
20
#include <linux/vmalloc.h>
Ingo Molnar's avatar
Ingo Molnar committed
21

22
#include <asm/init.h>
23 24 25
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
26
#include <asm/io_apic.h>
27
#include <asm/debugreg.h>
28
#include <asm/kexec-bzimage64.h>
Jiri Kosina's avatar
Jiri Kosina committed
29
#include <asm/setup.h>
30

31
#ifdef CONFIG_KEXEC_FILE
32
static struct kexec_file_ops *kexec_file_loaders[] = {
33
		&kexec_bzImage64_ops,
34
};
35
#endif
36

37 38
static void free_transition_pgtable(struct kimage *image)
{
39
	free_page((unsigned long)image->arch.p4d);
40 41 42 43 44 45 46
	free_page((unsigned long)image->arch.pud);
	free_page((unsigned long)image->arch.pmd);
	free_page((unsigned long)image->arch.pte);
}

static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
{
47
	p4d_t *p4d;
48 49 50 51 52 53 54 55 56 57
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long vaddr, paddr;
	int result = -ENOMEM;

	vaddr = (unsigned long)relocate_kernel;
	paddr = __pa(page_address(image->control_code_page)+PAGE_SIZE);
	pgd += pgd_index(vaddr);
	if (!pgd_present(*pgd)) {
58 59 60 61 62 63 64 65
		p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
		if (!p4d)
			goto err;
		image->arch.p4d = p4d;
		set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE));
	}
	p4d = p4d_offset(pgd, vaddr);
	if (!p4d_present(*p4d)) {
66 67 68 69
		pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
		if (!pud)
			goto err;
		image->arch.pud = pud;
70
		set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE));
71
	}
72
	pud = pud_offset(p4d, vaddr);
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
	if (!pud_present(*pud)) {
		pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
		if (!pmd)
			goto err;
		image->arch.pmd = pmd;
		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
	}
	pmd = pmd_offset(pud, vaddr);
	if (!pmd_present(*pmd)) {
		pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
		if (!pte)
			goto err;
		image->arch.pte = pte;
		set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
	}
	pte = pte_offset_kernel(pmd, vaddr);
	set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
	return 0;
err:
	free_transition_pgtable(image);
	return result;
}

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
static void *alloc_pgt_page(void *data)
{
	struct kimage *image = (struct kimage *)data;
	struct page *page;
	void *p = NULL;

	page = kimage_alloc_control_pages(image, 0);
	if (page) {
		p = page_address(page);
		clear_page(p);
	}

	return p;
}

111 112
static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
113 114 115
	struct x86_mapping_info info = {
		.alloc_pgt_page	= alloc_pgt_page,
		.context	= image,
116
		.page_flag	= __PAGE_KERNEL_LARGE_EXEC,
117
	};
118
	unsigned long mstart, mend;
119
	pgd_t *level4p;
120
	int result;
121 122
	int i;

123
	level4p = (pgd_t *)__va(start_pgtable);
124
	clear_page(level4p);
125 126 127 128 129 130 131 132 133
	for (i = 0; i < nr_pfn_mapped; i++) {
		mstart = pfn_mapped[i].start << PAGE_SHIFT;
		mend   = pfn_mapped[i].end << PAGE_SHIFT;

		result = kernel_ident_mapping_init(&info,
						 level4p, mstart, mend);
		if (result)
			return result;
	}
134

135
	/*
136 137 138 139
	 * segments's mem ranges could be outside 0 ~ max_pfn,
	 * for example when jump back to original kernel from kexeced kernel.
	 * or first kernel is booted with user mem map, and second kernel
	 * could be loaded out of that range.
140
	 */
141 142 143 144
	for (i = 0; i < image->nr_segments; i++) {
		mstart = image->segment[i].mem;
		mend   = mstart + image->segment[i].memsz;

145 146
		result = kernel_ident_mapping_init(&info,
						 level4p, mstart, mend);
147 148 149 150 151

		if (result)
			return result;
	}

152
	return init_transition_pgtable(image, level4p);
153 154 155 156
}

static void set_idt(void *newidt, u16 limit)
{
157
	struct desc_ptr curidt;
158 159

	/* x86-64 supports unaliged loads & stores */
160 161
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
162 163

	__asm__ __volatile__ (
164 165
		"lidtq %0\n"
		: : "m" (curidt)
166 167 168 169 170 171
		);
};


static void set_gdt(void *newgdt, u16 limit)
{
172
	struct desc_ptr curgdt;
173 174

	/* x86-64 supports unaligned loads & stores */
175 176
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
177 178

	__asm__ __volatile__ (
179 180
		"lgdtq %0\n"
		: : "m" (curgdt)
181 182 183 184 185 186
		);
};

static void load_segments(void)
{
	__asm__ __volatile__ (
187 188 189 190 191
		"\tmovl %0,%%ds\n"
		"\tmovl %0,%%es\n"
		"\tmovl %0,%%ss\n"
		"\tmovl %0,%%fs\n"
		"\tmovl %0,%%gs\n"
Michael Matz's avatar
Michael Matz committed
192
		: : "a" (__KERNEL_DS) : "memory"
193 194 195
		);
}

196
#ifdef CONFIG_KEXEC_FILE
197 198 199 200 201 202 203 204 205 206
/* Update purgatory as needed after various image segments have been prepared */
static int arch_update_purgatory(struct kimage *image)
{
	int ret = 0;

	if (!image->file_mode)
		return 0;

	/* Setup copying of backup region */
	if (image->type == KEXEC_TYPE_CRASH) {
207 208
		ret = kexec_purgatory_get_set_symbol(image,
				"purgatory_backup_dest",
209 210 211 212 213
				&image->arch.backup_load_addr,
				sizeof(image->arch.backup_load_addr), 0);
		if (ret)
			return ret;

214 215
		ret = kexec_purgatory_get_set_symbol(image,
				"purgatory_backup_src",
216 217 218 219 220
				&image->arch.backup_src_start,
				sizeof(image->arch.backup_src_start), 0);
		if (ret)
			return ret;

221 222
		ret = kexec_purgatory_get_set_symbol(image,
				"purgatory_backup_sz",
223 224 225 226 227 228 229 230
				&image->arch.backup_src_sz,
				sizeof(image->arch.backup_src_sz), 0);
		if (ret)
			return ret;
	}

	return ret;
}
231 232 233 234 235 236
#else /* !CONFIG_KEXEC_FILE */
static inline int arch_update_purgatory(struct kimage *image)
{
	return 0;
}
#endif /* CONFIG_KEXEC_FILE */
237

238 239
int machine_kexec_prepare(struct kimage *image)
{
240
	unsigned long start_pgtable;
241 242 243
	int result;

	/* Calculate the offsets */
Maneesh Soni's avatar
Maneesh Soni committed
244
	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
245 246 247

	/* Setup the identity mapped 64bit page table */
	result = init_pgtable(image, start_pgtable);
Maneesh Soni's avatar
Maneesh Soni committed
248
	if (result)
249 250
		return result;

251 252 253 254 255
	/* update purgatory as needed */
	result = arch_update_purgatory(image);
	if (result)
		return result;

256 257 258 259 260
	return 0;
}

void machine_kexec_cleanup(struct kimage *image)
{
261
	free_transition_pgtable(image);
262 263 264 265 266 267
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
Huang Ying's avatar
Huang Ying committed
268
void machine_kexec(struct kimage *image)
269
{
270 271
	unsigned long page_list[PAGES_NR];
	void *control_page;
272
	int save_ftrace_enabled;
273

274
#ifdef CONFIG_KEXEC_JUMP
275
	if (image->preserve_context)
276 277 278 279
		save_processor_state();
#endif

	save_ftrace_enabled = __ftrace_enabled_save();
Ingo Molnar's avatar
Ingo Molnar committed
280

281 282
	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();
283
	hw_breakpoint_disable();
284

285 286 287 288 289 290 291 292 293 294 295 296 297
	if (image->preserve_context) {
#ifdef CONFIG_X86_IO_APIC
		/*
		 * We need to put APICs in legacy mode so that we can
		 * get timer interrupts in second kernel. kexec/kdump
		 * paths already have calls to disable_IO_APIC() in
		 * one form or other. kexec jump path also need
		 * one.
		 */
		disable_IO_APIC();
#endif
	}

298
	control_page = page_address(image->control_code_page) + PAGE_SIZE;
299
	memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
300

301
	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
302
	page_list[VA_CONTROL_PAGE] = (unsigned long)control_page;
303 304
	page_list[PA_TABLE_PAGE] =
	  (unsigned long)__pa(page_address(image->control_code_page));
305

306 307 308 309
	if (image->type == KEXEC_TYPE_DEFAULT)
		page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
						<< PAGE_SHIFT);

310 311
	/*
	 * The segment registers are funny things, they have both a
312 313 314 315
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
316 317 318 319 320
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
321 322
	/*
	 * The gdt & idt are now invalid.
323 324
	 * If you want to load them you must set up your own idt & gdt.
	 */
325 326
	set_gdt(phys_to_virt(0), 0);
	set_idt(phys_to_virt(0), 0);
327

328
	/* now call it */
329 330 331 332 333 334
	image->start = relocate_kernel((unsigned long)image->head,
				       (unsigned long)page_list,
				       image->start,
				       image->preserve_context);

#ifdef CONFIG_KEXEC_JUMP
335
	if (image->preserve_context)
336 337 338 339
		restore_processor_state();
#endif

	__ftrace_enabled_restore(save_ftrace_enabled);
340
}
341

Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
342 343
void arch_crash_save_vmcoreinfo(void)
{
344
	VMCOREINFO_NUMBER(phys_base);
345
	VMCOREINFO_SYMBOL(init_level4_pgt);
346 347 348 349 350

#ifdef CONFIG_NUMA
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
351
	vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
Jiri Kosina's avatar
Jiri Kosina committed
352
			      kaslr_offset());
353
	VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
354 355
}

356 357
/* arch-dependent functionality related to kexec file-based syscall */

358
#ifdef CONFIG_KEXEC_FILE
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
				  unsigned long buf_len)
{
	int i, ret = -ENOEXEC;
	struct kexec_file_ops *fops;

	for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
		fops = kexec_file_loaders[i];
		if (!fops || !fops->probe)
			continue;

		ret = fops->probe(buf, buf_len);
		if (!ret) {
			image->fops = fops;
			return ret;
		}
	}

	return ret;
}

void *arch_kexec_kernel_image_load(struct kimage *image)
{
382 383 384
	vfree(image->arch.elf_headers);
	image->arch.elf_headers = NULL;

385 386 387 388 389 390 391 392 393 394 395 396 397 398
	if (!image->fops || !image->fops->load)
		return ERR_PTR(-ENOEXEC);

	return image->fops->load(image, image->kernel_buf,
				 image->kernel_buf_len, image->initrd_buf,
				 image->initrd_buf_len, image->cmdline_buf,
				 image->cmdline_buf_len);
}

int arch_kimage_file_post_load_cleanup(struct kimage *image)
{
	if (!image->fops || !image->fops->cleanup)
		return 0;

399
	return image->fops->cleanup(image->image_loader_data);
400
}
401

402
#ifdef CONFIG_KEXEC_VERIFY_SIG
403 404 405 406 407 408 409 410 411 412
int arch_kexec_kernel_verify_sig(struct kimage *image, void *kernel,
				 unsigned long kernel_len)
{
	if (!image->fops || !image->fops->verify_sig) {
		pr_debug("kernel loader does not support signature verification.");
		return -EKEYREJECTED;
	}

	return image->fops->verify_sig(kernel, kernel_len);
}
413
#endif
414

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
/*
 * Apply purgatory relocations.
 *
 * ehdr: Pointer to elf headers
 * sechdrs: Pointer to section headers.
 * relsec: section index of SHT_RELA section.
 *
 * TODO: Some of the code belongs to generic code. Move that in kexec.c.
 */
int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr,
				     Elf64_Shdr *sechdrs, unsigned int relsec)
{
	unsigned int i;
	Elf64_Rela *rel;
	Elf64_Sym *sym;
	void *location;
	Elf64_Shdr *section, *symtabsec;
	unsigned long address, sec_base, value;
	const char *strtab, *name, *shstrtab;

	/*
	 * ->sh_offset has been modified to keep the pointer to section
	 * contents in memory
	 */
	rel = (void *)sechdrs[relsec].sh_offset;

	/* Section to which relocations apply */
	section = &sechdrs[sechdrs[relsec].sh_info];

	pr_debug("Applying relocate section %u to %u\n", relsec,
		 sechdrs[relsec].sh_info);

	/* Associated symbol table */
	symtabsec = &sechdrs[sechdrs[relsec].sh_link];

	/* String table */
	if (symtabsec->sh_link >= ehdr->e_shnum) {
		/* Invalid strtab section number */
		pr_err("Invalid string table section index %d\n",
		       symtabsec->sh_link);
		return -ENOEXEC;
	}

	strtab = (char *)sechdrs[symtabsec->sh_link].sh_offset;

	/* section header string table */
	shstrtab = (char *)sechdrs[ehdr->e_shstrndx].sh_offset;

	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {

		/*
		 * rel[i].r_offset contains byte offset from beginning
		 * of section to the storage unit affected.
		 *
		 * This is location to update (->sh_offset). This is temporary
		 * buffer where section is currently loaded. This will finally
		 * be loaded to a different address later, pointed to by
		 * ->sh_addr. kexec takes care of moving it
		 *  (kexec_load_segment()).
		 */
		location = (void *)(section->sh_offset + rel[i].r_offset);

		/* Final address of the location */
		address = section->sh_addr + rel[i].r_offset;

		/*
		 * rel[i].r_info contains information about symbol table index
		 * w.r.t which relocation must be made and type of relocation
		 * to apply. ELF64_R_SYM() and ELF64_R_TYPE() macros get
		 * these respectively.
		 */
		sym = (Elf64_Sym *)symtabsec->sh_offset +
				ELF64_R_SYM(rel[i].r_info);

		if (sym->st_name)
			name = strtab + sym->st_name;
		else
			name = shstrtab + sechdrs[sym->st_shndx].sh_name;

		pr_debug("Symbol: %s info: %02x shndx: %02x value=%llx size: %llx\n",
			 name, sym->st_info, sym->st_shndx, sym->st_value,
			 sym->st_size);

		if (sym->st_shndx == SHN_UNDEF) {
			pr_err("Undefined symbol: %s\n", name);
			return -ENOEXEC;
		}

		if (sym->st_shndx == SHN_COMMON) {
			pr_err("symbol '%s' in common section\n", name);
			return -ENOEXEC;
		}

		if (sym->st_shndx == SHN_ABS)
			sec_base = 0;
		else if (sym->st_shndx >= ehdr->e_shnum) {
			pr_err("Invalid section %d for symbol %s\n",
			       sym->st_shndx, name);
			return -ENOEXEC;
		} else
			sec_base = sechdrs[sym->st_shndx].sh_addr;

		value = sym->st_value;
		value += sec_base;
		value += rel[i].r_addend;

		switch (ELF64_R_TYPE(rel[i].r_info)) {
		case R_X86_64_NONE:
			break;
		case R_X86_64_64:
			*(u64 *)location = value;
			break;
		case R_X86_64_32:
			*(u32 *)location = value;
			if (value != *(u32 *)location)
				goto overflow;
			break;
		case R_X86_64_32S:
			*(s32 *)location = value;
			if ((s64)value != *(s32 *)location)
				goto overflow;
			break;
		case R_X86_64_PC32:
			value -= (u64)address;
			*(u32 *)location = value;
			break;
		default:
			pr_err("Unknown rela relocation: %llu\n",
			       ELF64_R_TYPE(rel[i].r_info));
			return -ENOEXEC;
		}
	}
	return 0;

overflow:
	pr_err("Overflow in relocation type %d value 0x%lx\n",
	       (int)ELF64_R_TYPE(rel[i].r_info), value);
	return -ENOEXEC;
}
554
#endif /* CONFIG_KEXEC_FILE */
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599

static int
kexec_mark_range(unsigned long start, unsigned long end, bool protect)
{
	struct page *page;
	unsigned int nr_pages;

	/*
	 * For physical range: [start, end]. We must skip the unassigned
	 * crashk resource with zero-valued "end" member.
	 */
	if (!end || start > end)
		return 0;

	page = pfn_to_page(start >> PAGE_SHIFT);
	nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1;
	if (protect)
		return set_pages_ro(page, nr_pages);
	else
		return set_pages_rw(page, nr_pages);
}

static void kexec_mark_crashkres(bool protect)
{
	unsigned long control;

	kexec_mark_range(crashk_low_res.start, crashk_low_res.end, protect);

	/* Don't touch the control code page used in crash_kexec().*/
	control = PFN_PHYS(page_to_pfn(kexec_crash_image->control_code_page));
	/* Control code page is located in the 2nd page. */
	kexec_mark_range(crashk_res.start, control + PAGE_SIZE - 1, protect);
	control += KEXEC_CONTROL_PAGE_SIZE;
	kexec_mark_range(control, crashk_res.end, protect);
}

void arch_kexec_protect_crashkres(void)
{
	kexec_mark_crashkres(true);
}

void arch_kexec_unprotect_crashkres(void)
{
	kexec_mark_crashkres(false);
}