machine_kexec_64.c 5.97 KB
Newer Older
1
/*
Dave Jones's avatar
Dave Jones committed
2
 * handle transition of Linux booting another kernel
3
4
5
6
7
8
9
10
11
12
 * Copyright (C) 2002-2005 Eric Biederman  <ebiederm@xmission.com>
 *
 * This source code is licensed under the GNU General Public License,
 * Version 2.  See the file COPYING for more details.
 */

#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/string.h>
#include <linux/reboot.h>
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
13
#include <linux/numa.h>
14
15
16
17
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
18

19
20
21
22
23
24
25
26
27
#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
static u64 kexec_pgd[512] PAGE_ALIGNED;
static u64 kexec_pud0[512] PAGE_ALIGNED;
static u64 kexec_pmd0[512] PAGE_ALIGNED;
static u64 kexec_pte0[512] PAGE_ALIGNED;
static u64 kexec_pud1[512] PAGE_ALIGNED;
static u64 kexec_pmd1[512] PAGE_ALIGNED;
static u64 kexec_pte1[512] PAGE_ALIGNED;

28
static void init_level2_page(pmd_t *level2p, unsigned long addr)
29
30
{
	unsigned long end_addr;
Maneesh Soni's avatar
Maneesh Soni committed
31

32
	addr &= PAGE_MASK;
33
	end_addr = addr + PUD_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
34
	while (addr < end_addr) {
35
36
		set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
		addr += PMD_SIZE;
37
38
39
	}
}

40
static int init_level3_page(struct kimage *image, pud_t *level3p,
Maneesh Soni's avatar
Maneesh Soni committed
41
				unsigned long addr, unsigned long last_addr)
42
43
44
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
45

46
47
	result = 0;
	addr &= PAGE_MASK;
48
	end_addr = addr + PGDIR_SIZE;
Maneesh Soni's avatar
Maneesh Soni committed
49
	while ((addr < last_addr) && (addr < end_addr)) {
50
		struct page *page;
51
		pmd_t *level2p;
Maneesh Soni's avatar
Maneesh Soni committed
52

53
54
55
56
57
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
58
		level2p = (pmd_t *)page_address(page);
59
		init_level2_page(level2p, addr);
60
61
		set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
		addr += PUD_SIZE;
62
63
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
64
	while (addr < end_addr) {
65
66
		pud_clear(level3p++);
		addr += PUD_SIZE;
67
68
69
70
71
72
	}
out:
	return result;
}


73
static int init_level4_page(struct kimage *image, pgd_t *level4p,
Maneesh Soni's avatar
Maneesh Soni committed
74
				unsigned long addr, unsigned long last_addr)
75
76
77
{
	unsigned long end_addr;
	int result;
Maneesh Soni's avatar
Maneesh Soni committed
78

79
80
	result = 0;
	addr &= PAGE_MASK;
81
	end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
Maneesh Soni's avatar
Maneesh Soni committed
82
	while ((addr < last_addr) && (addr < end_addr)) {
83
		struct page *page;
84
		pud_t *level3p;
Maneesh Soni's avatar
Maneesh Soni committed
85

86
87
88
89
90
		page = kimage_alloc_control_pages(image, 0);
		if (!page) {
			result = -ENOMEM;
			goto out;
		}
91
		level3p = (pud_t *)page_address(page);
92
93
94
95
		result = init_level3_page(image, level3p, addr, last_addr);
		if (result) {
			goto out;
		}
96
97
		set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
		addr += PGDIR_SIZE;
98
99
	}
	/* clear the unused entries */
Maneesh Soni's avatar
Maneesh Soni committed
100
	while (addr < end_addr) {
101
102
		pgd_clear(level4p++);
		addr += PGDIR_SIZE;
103
	}
Maneesh Soni's avatar
Maneesh Soni committed
104
out:
105
106
107
108
109
110
	return result;
}


static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
{
111
112
	pgd_t *level4p;
	level4p = (pgd_t *)__va(start_pgtable);
Maneesh Soni's avatar
Maneesh Soni committed
113
 	return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
114
115
116
117
}

static void set_idt(void *newidt, u16 limit)
{
118
	struct desc_ptr curidt;
119
120

	/* x86-64 supports unaliged loads & stores */
121
122
	curidt.size    = limit;
	curidt.address = (unsigned long)newidt;
123
124

	__asm__ __volatile__ (
125
126
		"lidtq %0\n"
		: : "m" (curidt)
127
128
129
130
131
132
		);
};


static void set_gdt(void *newgdt, u16 limit)
{
133
	struct desc_ptr curgdt;
134
135

	/* x86-64 supports unaligned loads & stores */
136
137
	curgdt.size    = limit;
	curgdt.address = (unsigned long)newgdt;
138
139

	__asm__ __volatile__ (
140
141
		"lgdtq %0\n"
		: : "m" (curgdt)
142
143
144
145
146
147
		);
};

static void load_segments(void)
{
	__asm__ __volatile__ (
148
149
150
151
152
		"\tmovl %0,%%ds\n"
		"\tmovl %0,%%es\n"
		"\tmovl %0,%%ss\n"
		"\tmovl %0,%%fs\n"
		"\tmovl %0,%%gs\n"
Michael Matz's avatar
Michael Matz committed
153
		: : "a" (__KERNEL_DS) : "memory"
154
155
156
157
158
		);
}

int machine_kexec_prepare(struct kimage *image)
{
159
	unsigned long start_pgtable;
160
161
162
	int result;

	/* Calculate the offsets */
Maneesh Soni's avatar
Maneesh Soni committed
163
	start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
164
165
166

	/* Setup the identity mapped 64bit page table */
	result = init_pgtable(image, start_pgtable);
Maneesh Soni's avatar
Maneesh Soni committed
167
	if (result)
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
		return result;

	return 0;
}

void machine_kexec_cleanup(struct kimage *image)
{
	return;
}

/*
 * Do not allocate memory (or fail in any way) in machine_kexec().
 * We are past the point of no return, committed to rebooting now.
 */
NORET_TYPE void machine_kexec(struct kimage *image)
{
184
185
	unsigned long page_list[PAGES_NR];
	void *control_page;
186
187
188
189

	/* Interrupts aren't acceptable while we reboot */
	local_irq_disable();

190
191
192
	control_page = page_address(image->control_code_page) + PAGE_SIZE;
	memcpy(control_page, relocate_kernel, PAGE_SIZE);

193
	page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
194
	page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
195
	page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
196
	page_list[VA_PGD] = (unsigned long)kexec_pgd;
197
	page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
198
	page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
199
	page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
200
	page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
201
	page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
202
	page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
203
	page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
204
	page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
205
	page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
206
	page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
207
	page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
208
209
210
211
	page_list[VA_PTE_1] = (unsigned long)kexec_pte1;

	page_list[PA_TABLE_PAGE] =
	  (unsigned long)__pa(page_address(image->control_code_page));
212

213
214
215
216
217
	/* The segment registers are funny things, they have both a
	 * visible and an invisible part.  Whenever the visible part is
	 * set to a specific selector, the invisible part is loaded
	 * with from a table in memory.  At no other time is the
	 * descriptor table in memory accessed.
218
219
220
221
222
223
224
225
226
227
	 *
	 * I take advantage of this here by force loading the
	 * segments, before I zap the gdt with an invalid value.
	 */
	load_segments();
	/* The gdt & idt are now invalid.
	 * If you want to load them you must set up your own idt & gdt.
	 */
	set_gdt(phys_to_virt(0),0);
	set_idt(phys_to_virt(0),0);
228

229
	/* now call it */
230
231
	relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
			image->start);
232
}
233

Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
234
235
236
void arch_crash_save_vmcoreinfo(void)
{
#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
237
238
	VMCOREINFO_SYMBOL(node_data);
	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
Ken'ichi Ohmichi's avatar
Ken'ichi Ohmichi committed
239
240
241
#endif
}