pdt.c 3.49 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
/*
 *    Page Deallocation Table (PDT) support
 *
 *    The Page Deallocation Table (PDT) holds a table with pointers to bad
 *    memory (broken RAM modules) which is maintained by firmware.
 *
 *    Copyright 2017 by Helge Deller <deller@gmx.de>
 *
 *    TODO:
 *    - check regularily for new bad memory
 *    - add userspace interface with procfs or sysfs
 *    - increase number of PDT entries dynamically
 */

#include <linux/memblock.h>
#include <linux/seq_file.h>

#include <asm/pdc.h>
#include <asm/pdcpat.h>
#include <asm/sections.h>
#include <asm/pgtable.h>

enum pdt_access_type {
	PDT_NONE,
	PDT_PDC,
	PDT_PAT_NEW,
	PDT_PAT_OLD
};

static enum pdt_access_type pdt_type;

/* global PDT status information */
static struct pdc_mem_retinfo pdt_status;

#define MAX_PDT_TABLE_SIZE	PAGE_SIZE
#define MAX_PDT_ENTRIES		(MAX_PDT_TABLE_SIZE / sizeof(unsigned long))
static unsigned long pdt_entry[MAX_PDT_ENTRIES] __page_aligned_bss;


/* report PDT entries via /proc/meminfo */
void arch_report_meminfo(struct seq_file *m)
{
	if (pdt_type == PDT_NONE)
		return;

	seq_printf(m, "PDT_max_entries: %7lu\n",
			pdt_status.pdt_size);
	seq_printf(m, "PDT_cur_entries: %7lu\n",
			pdt_status.pdt_entries);
}

/*
 * pdc_pdt_init()
 *
 * Initialize kernel PDT structures, read initial PDT table from firmware,
 * report all current PDT entries and mark bad memory with memblock_reserve()
 * to avoid that the kernel will use broken memory areas.
 *
 */
void __init pdc_pdt_init(void)
{
	int ret, i;
	unsigned long entries;
	struct pdc_mem_read_pdt pdt_read_ret;

	if (is_pdc_pat()) {
		struct pdc_pat_mem_retinfo pat_rinfo;

		pdt_type = PDT_PAT_NEW;
		ret = pdc_pat_mem_pdt_info(&pat_rinfo);
		pdt_status.pdt_size = pat_rinfo.max_pdt_entries;
		pdt_status.pdt_entries = pat_rinfo.current_pdt_entries;
		pdt_status.pdt_status = 0;
		pdt_status.first_dbe_loc = pat_rinfo.first_dbe_loc;
		pdt_status.good_mem = pat_rinfo.good_mem;
	} else {
		pdt_type = PDT_PDC;
		ret = pdc_mem_pdt_info(&pdt_status);
	}

	if (ret != PDC_OK) {
		pdt_type = PDT_NONE;
		pr_info("PDT: Firmware does not provide any page deallocation"
			" information.\n");
		return;
	}

	entries = pdt_status.pdt_entries;
	WARN_ON(entries > MAX_PDT_ENTRIES);

	pr_info("PDT: size %lu, entries %lu, status %lu, dbe_loc 0x%lx,"
		" good_mem %lu\n",
			pdt_status.pdt_size, pdt_status.pdt_entries,
			pdt_status.pdt_status, pdt_status.first_dbe_loc,
			pdt_status.good_mem);

	if (entries == 0) {
		pr_info("PDT: Firmware reports all memory OK.\n");
		return;
	}

	if (pdt_status.first_dbe_loc &&
		pdt_status.first_dbe_loc <= __pa((unsigned long)&_end))
		pr_crit("CRITICAL: Bad memory inside kernel image memory area!\n");

	pr_warn("PDT: Firmware reports %lu entries of faulty memory:\n",
		entries);

	if (pdt_type == PDT_PDC)
		ret = pdc_mem_pdt_read_entries(&pdt_read_ret, pdt_entry);
	else {
#ifdef CONFIG_64BIT
		struct pdc_pat_mem_read_pd_retinfo pat_pret;

		ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
			MAX_PDT_ENTRIES);
		if (ret != PDC_OK) {
			pdt_type = PDT_PAT_OLD;
			ret = pdc_pat_mem_read_pd_pdt(&pat_pret, pdt_entry,
				MAX_PDT_TABLE_SIZE, 0);
		}
#else
		ret = PDC_BAD_PROC;
#endif
	}

	if (ret != PDC_OK) {
		pdt_type = PDT_NONE;
		pr_debug("PDT type %d, retval = %d\n", pdt_type, ret);
		return;
	}

	for (i = 0; i < pdt_status.pdt_entries; i++) {
		if (i < 20)
			pr_warn("PDT: BAD PAGE #%d at 0x%08lx (error_type = %lu)\n",
				i,
				pdt_entry[i] & PAGE_MASK,
				pdt_entry[i] & 1);

		/* mark memory page bad */
		memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
	}
}