hibernate_64.c 8.08 KB
Newer Older
1 2 3 4 5 6
/*
 * Hibernation support for x86-64
 *
 * Distribute under GPLv2
 *
 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
Pavel Machek's avatar
Pavel Machek committed
7
 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 9 10
 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
 */

11
#include <linux/gfp.h>
12 13
#include <linux/smp.h>
#include <linux/suspend.h>
14 15 16 17
#include <linux/scatterlist.h>
#include <linux/kdebug.h>

#include <crypto/hash.h>
18

19
#include <asm/e820/api.h>
20
#include <asm/init.h>
21 22 23 24
#include <asm/proto.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mtrr.h>
25
#include <asm/sections.h>
26
#include <asm/suspend.h>
27
#include <asm/tlbflush.h>
28

29
/* Defined in hibernate_asm_64.S */
30
extern asmlinkage __visible int restore_image(void);
31 32 33 34 35

/*
 * Address to jump to in the last phase of restore in order to get to the image
 * kernel's text (this value is passed in the image header).
 */
36
unsigned long restore_jump_address __visible;
37
unsigned long jump_address_phys;
38 39 40 41 42

/*
 * Value of the cr3 register from before the hibernation (this value is passed
 * in the image header).
 */
43
unsigned long restore_cr3 __visible;
44

45
unsigned long temp_level4_pgt __visible;
46

47 48
unsigned long relocated_restore_code __visible;

49
static int set_up_temporary_text_mapping(pgd_t *pgd)
50 51 52
{
	pmd_t *pmd;
	pud_t *pud;
53
	p4d_t *p4d;
54 55 56 57 58 59 60 61 62 63 64 65 66 67

	/*
	 * The new mapping only has to cover the page containing the image
	 * kernel's entry point (jump_address_phys), because the switch over to
	 * it is carried out by relocated code running from a page allocated
	 * specifically for this purpose and covered by the identity mapping, so
	 * the temporary kernel text mapping is only needed for the final jump.
	 * Moreover, in that mapping the virtual address of the image kernel's
	 * entry point must be the same as its virtual address in the image
	 * kernel (restore_jump_address), so the image kernel's
	 * restore_registers() code doesn't find itself in a different area of
	 * the virtual address space after switching over to the original page
	 * tables used by the image kernel.
	 */
68 69 70 71 72 73 74

	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
		p4d = (p4d_t *)get_safe_page(GFP_ATOMIC);
		if (!p4d)
			return -ENOMEM;
	}

75 76 77 78 79 80 81 82 83 84 85 86
	pud = (pud_t *)get_safe_page(GFP_ATOMIC);
	if (!pud)
		return -ENOMEM;

	pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
	if (!pmd)
		return -ENOMEM;

	set_pmd(pmd + pmd_index(restore_jump_address),
		__pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
	set_pud(pud + pud_index(restore_jump_address),
		__pud(__pa(pmd) | _KERNPG_TABLE));
87 88 89 90 91 92 93
	if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
		set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE));
		set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE));
	} else {
		/* No p4d for 4-level paging: point the pgd to the pud page table */
		set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(pud) | _KERNPG_TABLE));
	}
94 95 96

	return 0;
}
97

98
static void *alloc_pgt_page(void *context)
99
{
100
	return (void *)get_safe_page(GFP_ATOMIC);
101 102 103 104
}

static int set_up_temporary_mappings(void)
{
105 106
	struct x86_mapping_info info = {
		.alloc_pgt_page	= alloc_pgt_page,
107
		.page_flag	= __PAGE_KERNEL_LARGE_EXEC,
108
		.offset		= __PAGE_OFFSET,
109 110
	};
	unsigned long mstart, mend;
111
	pgd_t *pgd;
112 113
	int result;
	int i;
114

115 116
	pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
	if (!pgd)
117 118
		return -ENOMEM;

119
	/* Prepare a temporary mapping for the kernel text */
120
	result = set_up_temporary_text_mapping(pgd);
121 122
	if (result)
		return result;
123 124

	/* Set up the direct mapping from scratch */
125 126 127 128
	for (i = 0; i < nr_pfn_mapped; i++) {
		mstart = pfn_mapped[i].start << PAGE_SHIFT;
		mend   = pfn_mapped[i].end << PAGE_SHIFT;

129
		result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
130 131
		if (result)
			return result;
132
	}
133

134
	temp_level4_pgt = __pa(pgd);
135 136 137
	return 0;
}

138 139 140
static int relocate_restore_code(void)
{
	pgd_t *pgd;
141
	p4d_t *p4d;
142
	pud_t *pud;
143 144
	pmd_t *pmd;
	pte_t *pte;
145 146 147 148 149 150 151 152 153

	relocated_restore_code = get_safe_page(GFP_ATOMIC);
	if (!relocated_restore_code)
		return -ENOMEM;

	memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);

	/* Make the page containing the relocated code executable */
	pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
154 155 156 157 158 159
	p4d = p4d_offset(pgd, relocated_restore_code);
	if (p4d_large(*p4d)) {
		set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
		goto out;
	}
	pud = pud_offset(p4d, relocated_restore_code);
160 161
	if (pud_large(*pud)) {
		set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
162 163 164 165 166 167
		goto out;
	}
	pmd = pmd_offset(pud, relocated_restore_code);
	if (pmd_large(*pmd)) {
		set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
		goto out;
168
	}
169 170 171
	pte = pte_offset_kernel(pmd, relocated_restore_code);
	set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
out:
172 173 174 175
	__flush_tlb_all();
	return 0;
}

176 177 178 179 180
int swsusp_arch_resume(void)
{
	int error;

	/* We have got enough memory and from now on we cannot recover */
181 182
	error = set_up_temporary_mappings();
	if (error)
183 184
		return error;

185 186 187
	error = relocate_restore_code();
	if (error)
		return error;
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203

	restore_image();
	return 0;
}

/*
 *	pfn_is_nosave - check if given pfn is in the 'nosave' section
 */

int pfn_is_nosave(unsigned long pfn)
{
	unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
	unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
	return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
}

204 205
#define MD5_DIGEST_SIZE 16

206 207
struct restore_data_record {
	unsigned long jump_address;
208
	unsigned long jump_address_phys;
209 210
	unsigned long cr3;
	unsigned long magic;
211
	u8 e820_digest[MD5_DIGEST_SIZE];
212 213
};

214 215 216 217
#define RESTORE_MAGIC	0x23456789ABCDEF01UL

#if IS_BUILTIN(CONFIG_CRYPTO_MD5)
/**
218
 * get_e820_md5 - calculate md5 according to given e820 table
219
 *
220
 * @table: the e820 table to be calculated
221 222
 * @buf: the md5 result to be stored to
 */
223
static int get_e820_md5(struct e820_table *table, void *buf)
224 225 226 227 228 229 230 231 232 233 234 235
{
	struct scatterlist sg;
	struct crypto_ahash *tfm;
	int size;
	int ret = 0;

	tfm = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		return -ENOMEM;

	{
		AHASH_REQUEST_ON_STACK(req, tfm);
236
		size = offsetof(struct e820_table, entries) + sizeof(struct e820_entry) * table->nr_entries;
237
		ahash_request_set_tfm(req, tfm);
238
		sg_init_one(&sg, (u8 *)table, size);
239 240 241 242 243 244 245 246 247 248 249 250 251 252
		ahash_request_set_callback(req, 0, NULL, NULL);
		ahash_request_set_crypt(req, &sg, buf, size);

		if (crypto_ahash_digest(req))
			ret = -EINVAL;
		ahash_request_zero(req);
	}
	crypto_free_ahash(tfm);

	return ret;
}

static void hibernation_e820_save(void *buf)
{
253
	get_e820_md5(e820_table_firmware, buf);
254 255 256 257 258 259 260 261 262 263 264 265
}

static bool hibernation_e820_mismatch(void *buf)
{
	int ret;
	u8 result[MD5_DIGEST_SIZE];

	memset(result, 0, MD5_DIGEST_SIZE);
	/* If there is no digest in suspend kernel, let it go. */
	if (!memcmp(result, buf, MD5_DIGEST_SIZE))
		return false;

266
	ret = get_e820_md5(e820_table_firmware, result);
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	if (ret)
		return true;

	return memcmp(result, buf, MD5_DIGEST_SIZE) ? true : false;
}
#else
static void hibernation_e820_save(void *buf)
{
}

static bool hibernation_e820_mismatch(void *buf)
{
	/* If md5 is not builtin for restore kernel, let it go. */
	return false;
}
#endif
283 284 285 286 287 288 289 290 291 292 293 294

/**
 *	arch_hibernation_header_save - populate the architecture specific part
 *		of a hibernation image header
 *	@addr: address to save the data at
 */
int arch_hibernation_header_save(void *addr, unsigned int max_size)
{
	struct restore_data_record *rdr = addr;

	if (max_size < sizeof(struct restore_data_record))
		return -EOVERFLOW;
295 296
	rdr->jump_address = (unsigned long)&restore_registers;
	rdr->jump_address_phys = __pa_symbol(&restore_registers);
297 298
	rdr->cr3 = restore_cr3;
	rdr->magic = RESTORE_MAGIC;
299 300 301

	hibernation_e820_save(rdr->e820_digest);

302 303 304 305 306 307 308 309 310 311 312 313 314
	return 0;
}

/**
 *	arch_hibernation_header_restore - read the architecture specific data
 *		from the hibernation image header
 *	@addr: address to read the data from
 */
int arch_hibernation_header_restore(void *addr)
{
	struct restore_data_record *rdr = addr;

	restore_jump_address = rdr->jump_address;
315
	jump_address_phys = rdr->jump_address_phys;
316
	restore_cr3 = rdr->cr3;
317 318 319 320 321 322 323 324 325 326 327 328

	if (rdr->magic != RESTORE_MAGIC) {
		pr_crit("Unrecognized hibernate image header format!\n");
		return -EINVAL;
	}

	if (hibernation_e820_mismatch(rdr->e820_digest)) {
		pr_crit("Hibernate inconsistent memory map detected!\n");
		return -ENODEV;
	}

	return 0;
329
}