setup.c 30.9 KB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
1
2
3
4
5
6
7
8
9
/*
 *  linux/arch/arm/kernel/setup.c
 *
 *  Copyright (C) 1995-2001 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
10
#include <linux/efi.h>
11
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
12
13
14
15
16
17
18
19
20
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
21
#include <linux/screen_info.h>
22
#include <linux/of_platform.h>
Linus Torvalds's avatar
Linus Torvalds committed
23
#include <linux/init.h>
24
#include <linux/kexec.h>
25
#include <linux/of_fdt.h>
Linus Torvalds's avatar
Linus Torvalds committed
26
27
#include <linux/cpu.h>
#include <linux/interrupt.h>
28
#include <linux/smp.h>
29
#include <linux/proc_fs.h>
Russell King's avatar
Russell King committed
30
#include <linux/memblock.h>
31
32
#include <linux/bug.h>
#include <linux/compiler.h>
33
#include <linux/sort.h>
34
#include <linux/psci.h>
Linus Torvalds's avatar
Linus Torvalds committed
35

36
#include <asm/unified.h>
37
#include <asm/cp15.h>
Linus Torvalds's avatar
Linus Torvalds committed
38
#include <asm/cpu.h>
39
#include <asm/cputype.h>
40
#include <asm/efi.h>
Linus Torvalds's avatar
Linus Torvalds committed
41
#include <asm/elf.h>
42
#include <asm/early_ioremap.h>
43
#include <asm/fixmap.h>
Linus Torvalds's avatar
Linus Torvalds committed
44
#include <asm/procinfo.h>
45
#include <asm/psci.h>
Russell King's avatar
Russell King committed
46
#include <asm/sections.h>
Linus Torvalds's avatar
Linus Torvalds committed
47
#include <asm/setup.h>
48
#include <asm/smp_plat.h>
Linus Torvalds's avatar
Linus Torvalds committed
49
50
#include <asm/mach-types.h>
#include <asm/cacheflush.h>
51
#include <asm/cachetype.h>
Linus Torvalds's avatar
Linus Torvalds committed
52
#include <asm/tlbflush.h>
53
#include <asm/xen/hypervisor.h>
Linus Torvalds's avatar
Linus Torvalds committed
54

55
#include <asm/prom.h>
Linus Torvalds's avatar
Linus Torvalds committed
56
57
58
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
59
60
#include <asm/system_info.h>
#include <asm/system_misc.h>
Jason Wessel's avatar
Jason Wessel committed
61
#include <asm/traps.h>
62
#include <asm/unwind.h>
63
#include <asm/memblock.h>
64
#include <asm/virt.h>
Linus Torvalds's avatar
Linus Torvalds committed
65

66
#include "atags.h"
67

Linus Torvalds's avatar
Linus Torvalds committed
68
69
70
71
72
73
74
75
76
77
78
79
80

#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
char fpe_type[8];

static int __init fpe_setup(char *line)
{
	memcpy(fpe_type, line, 8);
	return 1;
}

__setup("fpe=", fpe_setup);
#endif

81
extern void init_default_cache_policy(unsigned long);
82
extern void paging_init(const struct machine_desc *desc);
83
extern void early_paging_init(const struct machine_desc *);
84
extern void adjust_lowmem_bounds(void);
85
extern enum reboot_mode reboot_mode;
86
extern void setup_dma_zone(const struct machine_desc *desc);
Linus Torvalds's avatar
Linus Torvalds committed
87
88

unsigned int processor_id;
89
EXPORT_SYMBOL(processor_id);
90
unsigned int __machine_arch_type __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
91
EXPORT_SYMBOL(__machine_arch_type);
92
unsigned int cacheid __read_mostly;
93
EXPORT_SYMBOL(cacheid);
Linus Torvalds's avatar
Linus Torvalds committed
94

Bill Gatliff's avatar
Bill Gatliff committed
95
96
unsigned int __atags_pointer __initdata;

Linus Torvalds's avatar
Linus Torvalds committed
97
98
99
unsigned int system_rev;
EXPORT_SYMBOL(system_rev);

100
101
102
const char *system_serial;
EXPORT_SYMBOL(system_serial);

Linus Torvalds's avatar
Linus Torvalds committed
103
104
105
106
107
108
unsigned int system_serial_low;
EXPORT_SYMBOL(system_serial_low);

unsigned int system_serial_high;
EXPORT_SYMBOL(system_serial_high);

109
unsigned int elf_hwcap __read_mostly;
Linus Torvalds's avatar
Linus Torvalds committed
110
111
EXPORT_SYMBOL(elf_hwcap);

112
113
114
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);

Linus Torvalds's avatar
Linus Torvalds committed
115
116

#ifdef MULTI_CPU
117
struct processor processor __ro_after_init;
Linus Torvalds's avatar
Linus Torvalds committed
118
119
#endif
#ifdef MULTI_TLB
120
struct cpu_tlb_fns cpu_tlb __ro_after_init;
Linus Torvalds's avatar
Linus Torvalds committed
121
122
#endif
#ifdef MULTI_USER
123
struct cpu_user_fns cpu_user __ro_after_init;
Linus Torvalds's avatar
Linus Torvalds committed
124
125
#endif
#ifdef MULTI_CACHE
126
struct cpu_cache_fns cpu_cache __ro_after_init;
Linus Torvalds's avatar
Linus Torvalds committed
127
#endif
128
#ifdef CONFIG_OUTER_CACHE
129
struct outer_cache_fns outer_cache __ro_after_init;
130
EXPORT_SYMBOL(outer_cache);
131
#endif
Linus Torvalds's avatar
Linus Torvalds committed
132

133
134
135
136
137
138
139
/*
 * Cached cpu_architecture() result for use by assembler code.
 * C code should use the cpu_architecture() function instead of accessing this
 * variable directly.
 */
int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;

140
141
142
143
struct stack {
	u32 irq[3];
	u32 abt[3];
	u32 und[3];
144
	u32 fiq[3];
145
146
} ____cacheline_aligned;

147
#ifndef CONFIG_CPU_V7M
148
static struct stack stacks[NR_CPUS];
149
#endif
150

Linus Torvalds's avatar
Linus Torvalds committed
151
152
153
154
155
char elf_platform[ELF_PLATFORM_SIZE];
EXPORT_SYMBOL(elf_platform);

static const char *cpu_name;
static const char *machine_name;
156
static char __initdata cmd_line[COMMAND_LINE_SIZE];
157
const struct machine_desc *machine_desc __initdata;
Linus Torvalds's avatar
Linus Torvalds committed
158
159
160
161
162
163
164
165
166
167

static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
#define ENDIANNESS ((char)endian_test.l)

DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);

/*
 * Standard memory resources
 */
static struct resource mem_res[] = {
168
169
170
171
172
173
174
	{
		.name = "Video RAM",
		.start = 0,
		.end = 0,
		.flags = IORESOURCE_MEM
	},
	{
175
		.name = "Kernel code",
176
177
		.start = 0,
		.end = 0,
178
		.flags = IORESOURCE_SYSTEM_RAM
179
180
181
182
183
	},
	{
		.name = "Kernel data",
		.start = 0,
		.end = 0,
184
		.flags = IORESOURCE_SYSTEM_RAM
185
	}
Linus Torvalds's avatar
Linus Torvalds committed
186
187
188
189
190
191
192
};

#define video_ram   mem_res[0]
#define kernel_code mem_res[1]
#define kernel_data mem_res[2]

static struct resource io_res[] = {
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
	{
		.name = "reserved",
		.start = 0x3bc,
		.end = 0x3be,
		.flags = IORESOURCE_IO | IORESOURCE_BUSY
	},
	{
		.name = "reserved",
		.start = 0x378,
		.end = 0x37f,
		.flags = IORESOURCE_IO | IORESOURCE_BUSY
	},
	{
		.name = "reserved",
		.start = 0x278,
		.end = 0x27f,
		.flags = IORESOURCE_IO | IORESOURCE_BUSY
	}
Linus Torvalds's avatar
Linus Torvalds committed
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
};

#define lp0 io_res[0]
#define lp1 io_res[1]
#define lp2 io_res[2]

static const char *proc_arch[] = {
	"undefined/unknown",
	"3",
	"4",
	"4T",
	"5",
	"5T",
	"5TE",
	"5TEJ",
	"6TEJ",
227
	"7",
228
	"7M",
Linus Torvalds's avatar
Linus Torvalds committed
229
230
231
232
233
234
235
236
	"?(12)",
	"?(13)",
	"?(14)",
	"?(15)",
	"?(16)",
	"?(17)",
};

237
238
239
240
241
242
#ifdef CONFIG_CPU_V7M
static int __get_cpu_architecture(void)
{
	return CPU_ARCH_ARMv7M;
}
#else
243
static int __get_cpu_architecture(void)
Linus Torvalds's avatar
Linus Torvalds committed
244
245
246
{
	int cpu_arch;

247
	if ((read_cpuid_id() & 0x0008f000) == 0) {
Linus Torvalds's avatar
Linus Torvalds committed
248
		cpu_arch = CPU_ARCH_UNKNOWN;
249
250
251
252
	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
		cpu_arch = (read_cpuid_id() >> 16) & 7;
Linus Torvalds's avatar
Linus Torvalds committed
253
254
		if (cpu_arch)
			cpu_arch += CPU_ARCH_ARMv3;
255
	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
256
257
		/* Revised CPUID format. Read the Memory Model Feature
		 * Register 0 and check for VMSAv7 or PMSAv7 */
258
		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
259
260
		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
		    (mmfr0 & 0x000000f0) >= 0x00000030)
261
262
263
264
265
266
267
268
			cpu_arch = CPU_ARCH_ARMv7;
		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
			 (mmfr0 & 0x000000f0) == 0x00000020)
			cpu_arch = CPU_ARCH_ARMv6;
		else
			cpu_arch = CPU_ARCH_UNKNOWN;
	} else
		cpu_arch = CPU_ARCH_UNKNOWN;
Linus Torvalds's avatar
Linus Torvalds committed
269
270
271

	return cpu_arch;
}
272
#endif
Linus Torvalds's avatar
Linus Torvalds committed
273

274
275
276
277
278
279
280
int __pure cpu_architecture(void)
{
	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);

	return __cpu_architecture;
}

281
282
283
284
285
static int cpu_has_aliasing_icache(unsigned int arch)
{
	int aliasing_icache;
	unsigned int id_reg, num_sets, line_size;

286
287
288
289
	/* PIPT caches never alias. */
	if (icache_is_pipt())
		return 0;

290
291
292
	/* arch specifies the register format */
	switch (arch) {
	case CPU_ARCH_ARMv7:
293
		set_csselr(CSSELR_ICACHE | CSSELR_L1);
294
		isb();
295
		id_reg = read_ccsidr();
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
		line_size = 4 << ((id_reg & 0x7) + 2);
		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
		break;
	case CPU_ARCH_ARMv6:
		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
		break;
	default:
		/* I-cache aliases will be handled by D-cache aliasing code */
		aliasing_icache = 0;
	}

	return aliasing_icache;
}

311
312
313
314
static void __init cacheid_init(void)
{
	unsigned int arch = cpu_architecture();

315
	if (arch >= CPU_ARCH_ARMv6) {
316
		unsigned int cachetype = read_cpuid_cachetype();
317
318
319
320

		if ((arch == CPU_ARCH_ARMv7M) && !cachetype) {
			cacheid = 0;
		} else if ((cachetype & (7 << 29)) == 4 << 29) {
321
			/* ARMv7 register format */
322
			arch = CPU_ARCH_ARMv7;
323
			cacheid = CACHEID_VIPT_NONALIASING;
324
325
			switch (cachetype & (3 << 14)) {
			case (1 << 14):
326
				cacheid |= CACHEID_ASID_TAGGED;
327
328
329
330
331
				break;
			case (3 << 14):
				cacheid |= CACHEID_PIPT;
				break;
			}
332
		} else {
333
334
335
336
337
			arch = CPU_ARCH_ARMv6;
			if (cachetype & (1 << 23))
				cacheid = CACHEID_VIPT_ALIASING;
			else
				cacheid = CACHEID_VIPT_NONALIASING;
338
		}
339
340
		if (cpu_has_aliasing_icache(arch))
			cacheid |= CACHEID_VIPT_I_ALIASING;
341
342
343
	} else {
		cacheid = CACHEID_VIVT;
	}
344

345
	pr_info("CPU: %s data cache, %s instruction cache\n",
346
347
		cache_is_vivt() ? "VIVT" :
		cache_is_vipt_aliasing() ? "VIPT aliasing" :
348
		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
349
350
		cache_is_vivt() ? "VIVT" :
		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
351
		icache_is_vipt_aliasing() ? "VIPT aliasing" :
352
		icache_is_pipt() ? "PIPT" :
353
		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
354
355
}

Linus Torvalds's avatar
Linus Torvalds committed
356
357
358
359
/*
 * These functions re-use the assembly code in head.S, which
 * already provide the required functionality.
 */
360
extern struct proc_info_list *lookup_processor_type(unsigned int);
361

362
void __init early_print(const char *str, ...)
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
{
	extern void printascii(const char *);
	char buf[256];
	va_list ap;

	va_start(ap, str);
	vsnprintf(buf, sizeof(buf), str, ap);
	va_end(ap);

#ifdef CONFIG_DEBUG_LL
	printascii(buf);
#endif
	printk("%s", buf);
}

378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
#ifdef CONFIG_ARM_PATCH_IDIV

static inline u32 __attribute_const__ sdiv_instruction(void)
{
	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
		/* "sdiv r0, r0, r1" */
		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
		return __opcode_to_mem_thumb32(insn);
	}

	/* "sdiv r0, r0, r1" */
	return __opcode_to_mem_arm(0xe710f110);
}

static inline u32 __attribute_const__ udiv_instruction(void)
{
	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
		/* "udiv r0, r0, r1" */
		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
		return __opcode_to_mem_thumb32(insn);
	}

	/* "udiv r0, r0, r1" */
	return __opcode_to_mem_arm(0xe730f110);
}

static inline u32 __attribute_const__ bx_lr_instruction(void)
{
	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
		/* "bx lr; nop" */
		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
		return __opcode_to_mem_thumb32(insn);
	}

	/* "bx lr" */
	return __opcode_to_mem_arm(0xe12fff1e);
}

static void __init patch_aeabi_idiv(void)
{
	extern void __aeabi_uidiv(void);
	extern void __aeabi_idiv(void);
	uintptr_t fn_addr;
	unsigned int mask;

	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
	if (!(elf_hwcap & mask))
		return;

	pr_info("CPU: div instructions available: patching division code\n");

	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
430
	asm ("" : "+g" (fn_addr));
431
432
433
434
435
	((u32 *)fn_addr)[0] = udiv_instruction();
	((u32 *)fn_addr)[1] = bx_lr_instruction();
	flush_icache_range(fn_addr, fn_addr + 8);

	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
436
	asm ("" : "+g" (fn_addr));
437
438
439
440
441
442
443
444
445
	((u32 *)fn_addr)[0] = sdiv_instruction();
	((u32 *)fn_addr)[1] = bx_lr_instruction();
	flush_icache_range(fn_addr, fn_addr + 8);
}

#else
static inline void patch_aeabi_idiv(void) { }
#endif

446
447
static void __init cpuid_init_hwcaps(void)
{
448
	int block;
449
	u32 isar5;
450
451
452
453

	if (cpu_architecture() < CPU_ARCH_ARMv7)
		return;

454
455
	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
	if (block >= 2)
456
		elf_hwcap |= HWCAP_IDIVA;
457
	if (block >= 1)
458
		elf_hwcap |= HWCAP_IDIVT;
459
460

	/* LPAE implies atomic ldrd/strd instructions */
461
462
	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
	if (block >= 5)
463
		elf_hwcap |= HWCAP_LPAE;
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484

	/* check for supported v8 Crypto instructions */
	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);

	block = cpuid_feature_extract_field(isar5, 4);
	if (block >= 2)
		elf_hwcap2 |= HWCAP2_PMULL;
	if (block >= 1)
		elf_hwcap2 |= HWCAP2_AES;

	block = cpuid_feature_extract_field(isar5, 8);
	if (block >= 1)
		elf_hwcap2 |= HWCAP2_SHA1;

	block = cpuid_feature_extract_field(isar5, 12);
	if (block >= 1)
		elf_hwcap2 |= HWCAP2_SHA2;

	block = cpuid_feature_extract_field(isar5, 16);
	if (block >= 1)
		elf_hwcap2 |= HWCAP2_CRC32;
485
486
}

487
static void __init elf_hwcap_fixup(void)
488
{
489
	unsigned id = read_cpuid_id();
490
491
492
493
494

	/*
	 * HWCAP_TLS is available only on 1136 r1p0 and later,
	 * see also kuser_get_tls_init.
	 */
495
496
	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
	    ((id >> 20) & 3) == 0) {
497
		elf_hwcap &= ~HWCAP_TLS;
498
499
500
501
502
503
504
505
506
507
508
509
		return;
	}

	/* Verify if CPUID scheme is implemented */
	if ((id & 0x000f0000) != 0x000f0000)
		return;

	/*
	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
	 * avoid advertising SWP; it may not be atomic with
	 * multiprocessing cores.
	 */
510
511
	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
512
	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
513
		elf_hwcap &= ~HWCAP_SWP;
514
515
}

516
517
518
/*
 * cpu_init - initialise one CPU.
 *
Russell King's avatar
Russell King committed
519
 * cpu_init sets up the per-CPU stacks.
520
 */
521
void notrace cpu_init(void)
522
{
523
#ifndef CONFIG_CPU_V7M
524
525
526
527
	unsigned int cpu = smp_processor_id();
	struct stack *stk = &stacks[cpu];

	if (cpu >= NR_CPUS) {
528
		pr_crit("CPU%u: bad primary CPU number\n", cpu);
529
530
531
		BUG();
	}

532
533
534
535
536
537
	/*
	 * This only works on resume and secondary cores. For booting on the
	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
	 */
	set_my_cpu_offset(per_cpu_offset(cpu));

538
539
	cpu_proc_init();

540
541
542
543
544
545
546
547
548
549
	/*
	 * Define the placement constraint for the inline asm directive below.
	 * In Thumb-2, msr with an immediate value is not allowed.
	 */
#ifdef CONFIG_THUMB2_KERNEL
#define PLC	"r"
#else
#define PLC	"I"
#endif

550
551
552
553
554
	/*
	 * setup stacks for re-entrant exception handlers
	 */
	__asm__ (
	"msr	cpsr_c, %1\n\t"
555
556
	"add	r14, %0, %2\n\t"
	"mov	sp, r14\n\t"
557
	"msr	cpsr_c, %3\n\t"
558
559
	"add	r14, %0, %4\n\t"
	"mov	sp, r14\n\t"
560
	"msr	cpsr_c, %5\n\t"
561
562
	"add	r14, %0, %6\n\t"
	"mov	sp, r14\n\t"
563
564
565
566
	"msr	cpsr_c, %7\n\t"
	"add	r14, %0, %8\n\t"
	"mov	sp, r14\n\t"
	"msr	cpsr_c, %9"
567
568
	    :
	    : "r" (stk),
569
	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
570
	      "I" (offsetof(struct stack, irq[0])),
571
	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
572
	      "I" (offsetof(struct stack, abt[0])),
573
	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
574
	      "I" (offsetof(struct stack, und[0])),
575
576
	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
	      "I" (offsetof(struct stack, fiq[0])),
577
	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
578
	    : "r14");
579
#endif
580
581
}

582
u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
583
584
585
586

void __init smp_setup_processor_id(void)
{
	int i;
587
588
	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
589
590

	cpu_logical_map(0) = cpu;
591
	for (i = 1; i < nr_cpu_ids; ++i)
592
593
		cpu_logical_map(i) = i == cpu ? 0 : i;

594
595
596
597
598
599
600
	/*
	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
	 * using percpu variable early, for example, lockdep will
	 * access percpu variable inside lock_release
	 */
	set_my_cpu_offset(0);

601
	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
602
603
}

604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
struct mpidr_hash mpidr_hash;
#ifdef CONFIG_SMP
/**
 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 *			  level in order to build a linear index from an
 *			  MPIDR value. Resulting algorithm is a collision
 *			  free hash carried out through shifting and ORing
 */
static void __init smp_build_mpidr_hash(void)
{
	u32 i, affinity;
	u32 fs[3], bits[3], ls, mask = 0;
	/*
	 * Pre-scan the list of MPIDRS and filter out bits that do
	 * not contribute to affinity levels, ie they never toggle.
	 */
	for_each_possible_cpu(i)
		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
	pr_debug("mask of set bits 0x%x\n", mask);
	/*
	 * Find and stash the last and first bit set at all affinity levels to
	 * check how many bits are required to represent them.
	 */
	for (i = 0; i < 3; i++) {
		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
		/*
		 * Find the MSB bit and LSB bits position
		 * to determine how many bits are required
		 * to express the affinity level.
		 */
		ls = fls(affinity);
		fs[i] = affinity ? ffs(affinity) - 1 : 0;
		bits[i] = ls - fs[i];
	}
	/*
	 * An index can be created from the MPIDR by isolating the
	 * significant bits at each affinity level and by shifting
	 * them in order to compress the 24 bits values space to a
	 * compressed set of values. This is equivalent to hashing
	 * the MPIDR through shifting and ORing. It is a collision free
	 * hash though not minimal since some levels might contain a number
	 * of CPUs that is not an exact power of 2 and their bit
	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
	 */
	mpidr_hash.shift_aff[0] = fs[0];
	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
						(bits[1] + bits[0]);
	mpidr_hash.mask = mask;
	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
				mpidr_hash.shift_aff[0],
				mpidr_hash.shift_aff[1],
				mpidr_hash.shift_aff[2],
				mpidr_hash.mask,
				mpidr_hash.bits);
	/*
	 * 4x is an arbitrary value used to warn on a hash table much bigger
	 * than expected on most systems.
	 */
	if (mpidr_hash_size() > 4 * num_possible_cpus())
		pr_warn("Large number of MPIDR hash buckets detected\n");
	sync_cache_w(&mpidr_hash);
}
#endif

670
671
672
673
674
675
676
677
678
679
680
static void __init setup_processor(void)
{
	struct proc_info_list *list;

	/*
	 * locate processor in the list of supported processor
	 * types.  The linker builds this table for us from the
	 * entries in arch/arm/mm/proc-*.S
	 */
	list = lookup_processor_type(read_cpuid_id());
	if (!list) {
681
682
		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
		       read_cpuid_id());
683
684
685
686
		while (1);
	}

	cpu_name = list->cpu_name;
687
	__cpu_architecture = __get_cpu_architecture();
688
689
690
691
692
693
694
695
696
697
698
699
700
701

#ifdef MULTI_CPU
	processor = *list->proc;
#endif
#ifdef MULTI_TLB
	cpu_tlb = *list->tlb;
#endif
#ifdef MULTI_USER
	cpu_user = *list->user;
#endif
#ifdef MULTI_CACHE
	cpu_cache = *list->cache;
#endif

702
703
	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
704
		proc_arch[cpu_architecture()], get_cr());
705

706
707
708
709
	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
		 list->arch_name, ENDIANNESS);
	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
		 list->elf_name, ENDIANNESS);
710
	elf_hwcap = list->elf_hwcap;
711
712

	cpuid_init_hwcaps();
713
	patch_aeabi_idiv();
714

715
#ifndef CONFIG_ARM_THUMB
716
	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
717
#endif
718
719
720
#ifdef CONFIG_MMU
	init_default_cache_policy(list->__cpu_mm_mmu_flags);
#endif
721
722
	erratum_a15_798181_init();

723
	elf_hwcap_fixup();
724
725
726
727
728

	cacheid_init();
	cpu_init();
}

729
void __init dump_machine_table(void)
Linus Torvalds's avatar
Linus Torvalds committed
730
{
731
	const struct machine_desc *p;
Linus Torvalds's avatar
Linus Torvalds committed
732

733
734
	early_print("Available machine support:\n\nID (hex)\tNAME\n");
	for_each_machine_desc(p)
735
		early_print("%08x\t%s\n", p->nr, p->name);
Linus Torvalds's avatar
Linus Torvalds committed
736

737
	early_print("\nPlease check your kernel config and/or bootloader.\n");
Linus Torvalds's avatar
Linus Torvalds committed
738

739
740
	while (true)
		/* can't use cpu_relax() here as it may require MMU setup */;
Linus Torvalds's avatar
Linus Torvalds committed
741
742
}

743
int __init arm_add_memory(u64 start, u64 size)
744
{
745
	u64 aligned_start;
746

747
748
	/*
	 * Ensure that start/size are aligned to a page boundary.
749
	 * Size is rounded down, start is rounded up.
750
	 */
751
	aligned_start = PAGE_ALIGN(start);
752
753
754
755
	if (aligned_start > start + size)
		size = 0;
	else
		size -= aligned_start - start;
756

757
758
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
	if (aligned_start > ULONG_MAX) {
759
760
		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
			(long long)start);
761
762
763
764
		return -EINVAL;
	}

	if (aligned_start + size > ULONG_MAX) {
765
766
		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
			(long long)start);
767
768
769
770
771
		/*
		 * To ensure bank->start + bank->size is representable in
		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
		 * This means we lose a page after masking.
		 */
772
		size = ULONG_MAX - aligned_start;
773
774
775
	}
#endif

776
777
778
779
780
781
782
783
784
785
786
787
788
789
	if (aligned_start < PHYS_OFFSET) {
		if (aligned_start + size <= PHYS_OFFSET) {
			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
				aligned_start, aligned_start + size);
			return -EINVAL;
		}

		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
			aligned_start, (u64)PHYS_OFFSET);

		size -= PHYS_OFFSET - aligned_start;
		aligned_start = PHYS_OFFSET;
	}

Laura Abbott's avatar
Laura Abbott committed
790
791
	start = aligned_start;
	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
792
793
794
795
796

	/*
	 * Check whether this memory region has non-zero size or
	 * invalid node number.
	 */
Laura Abbott's avatar
Laura Abbott committed
797
	if (size == 0)
798
799
		return -EINVAL;

Laura Abbott's avatar
Laura Abbott committed
800
	memblock_add(start, size);
801
	return 0;
802
803
}

Linus Torvalds's avatar
Linus Torvalds committed
804
805
806
807
/*
 * Pick out the memory size.  We look for mem=size@start,
 * where start and size are "size[KkMm]"
 */
Laura Abbott's avatar
Laura Abbott committed
808

809
static int __init early_mem(char *p)
Linus Torvalds's avatar
Linus Torvalds committed
810
811
{
	static int usermem __initdata = 0;
812
813
	u64 size;
	u64 start;
814
	char *endp;
Linus Torvalds's avatar
Linus Torvalds committed
815
816
817
818
819
820
821
822

	/*
	 * If the user specifies memory size, we
	 * blow away any automatically generated
	 * size.
	 */
	if (usermem == 0) {
		usermem = 1;
Laura Abbott's avatar
Laura Abbott committed
823
824
		memblock_remove(memblock_start_of_DRAM(),
			memblock_end_of_DRAM() - memblock_start_of_DRAM());
Linus Torvalds's avatar
Linus Torvalds committed
825
826
827
	}

	start = PHYS_OFFSET;
828
829
830
	size  = memparse(p, &endp);
	if (*endp == '@')
		start = memparse(endp + 1, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
831

Andrew Morton's avatar
Andrew Morton committed
832
	arm_add_memory(start, size);
Linus Torvalds's avatar
Linus Torvalds committed
833

834
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
835
}
836
early_param("mem", early_mem);
Linus Torvalds's avatar
Linus Torvalds committed
837

838
static void __init request_standard_resources(const struct machine_desc *mdesc)
Linus Torvalds's avatar
Linus Torvalds committed
839
{
840
	struct memblock_region *region;
Linus Torvalds's avatar
Linus Torvalds committed
841
842
	struct resource *res;

Russell King's avatar
Russell King committed
843
	kernel_code.start   = virt_to_phys(_text);
844
	kernel_code.end     = virt_to_phys(__init_begin - 1);
845
	kernel_data.start   = virt_to_phys(_sdata);
Russell King's avatar
Russell King committed
846
	kernel_data.end     = virt_to_phys(_end - 1);
Linus Torvalds's avatar
Linus Torvalds committed
847

848
	for_each_memblock(memory, region) {
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
		phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
		phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
		unsigned long boot_alias_start;

		/*
		 * Some systems have a special memory alias which is only
		 * used for booting.  We need to advertise this region to
		 * kexec-tools so they know where bootable RAM is located.
		 */
		boot_alias_start = phys_to_idmap(start);
		if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
			res = memblock_virt_alloc(sizeof(*res), 0);
			res->name = "System RAM (boot alias)";
			res->start = boot_alias_start;
			res->end = phys_to_idmap(end);
			res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
			request_resource(&iomem_resource, res);
		}

868
		res = memblock_virt_alloc(sizeof(*res), 0);
Linus Torvalds's avatar
Linus Torvalds committed
869
		res->name  = "System RAM";
870
871
		res->start = start;
		res->end = end;
872
		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
Linus Torvalds's avatar
Linus Torvalds committed
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901

		request_resource(&iomem_resource, res);

		if (kernel_code.start >= res->start &&
		    kernel_code.end <= res->end)
			request_resource(res, &kernel_code);
		if (kernel_data.start >= res->start &&
		    kernel_data.end <= res->end)
			request_resource(res, &kernel_data);
	}

	if (mdesc->video_start) {
		video_ram.start = mdesc->video_start;
		video_ram.end   = mdesc->video_end;
		request_resource(&iomem_resource, &video_ram);
	}

	/*
	 * Some machines don't have the possibility of ever
	 * possessing lp0, lp1 or lp2
	 */
	if (mdesc->reserve_lp0)
		request_resource(&ioport_resource, &lp0);
	if (mdesc->reserve_lp1)
		request_resource(&ioport_resource, &lp1);
	if (mdesc->reserve_lp2)
		request_resource(&ioport_resource, &lp2);
}

902
903
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
    defined(CONFIG_EFI)
Linus Torvalds's avatar
Linus Torvalds committed
904
905
906
907
908
909
910
911
struct screen_info screen_info = {
 .orig_video_lines	= 30,
 .orig_video_cols	= 80,
 .orig_video_mode	= 0,
 .orig_video_ega_bx	= 0,
 .orig_video_isVGA	= 1,
 .orig_video_points	= 8
};
912
#endif
Linus Torvalds's avatar
Linus Torvalds committed
913
914
915

static int __init customize_machine(void)
{
916
917
918
919
920
921
	/*
	 * customizes platform devices, or adds new ones
	 * On DT based machines, we fall back to populating the
	 * machine from the device tree, if no callback is provided,
	 * otherwise we would always need an init_machine callback.
	 */
922
923
	if (machine_desc->init_machine)
		machine_desc->init_machine();
924

Linus Torvalds's avatar
Linus Torvalds committed
925
926
927
928
	return 0;
}
arch_initcall(customize_machine);

929
930
static int __init init_machine_late(void)
{
931
932
933
	struct device_node *root;
	int ret;

934
935
	if (machine_desc->init_late)
		machine_desc->init_late();
936
937
938
939
940
941
942
943
944
945
946
947
948
949

	root = of_find_node_by_path("/");
	if (root) {
		ret = of_property_read_string(root, "serial-number",
					      &system_serial);
		if (ret)
			system_serial = NULL;
	}

	if (!system_serial)
		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
					  system_serial_high,
					  system_serial_low);

950
951
952
953
	return 0;
}
late_initcall(init_machine_late);

954
#ifdef CONFIG_KEXEC
955
956
957
958
959
960
/*
 * The crash region must be aligned to 128MB to avoid
 * zImage relocating below the reserved region.
 */
#define CRASH_ALIGN	(128 << 20)

961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
static inline unsigned long long get_total_mem(void)
{
	unsigned long total;

	total = max_low_pfn - min_low_pfn;
	return total << PAGE_SHIFT;
}

/**
 * reserve_crashkernel() - reserves memory are for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by a dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(void)
{
	unsigned long long crash_size, crash_base;
	unsigned long long total_mem;
	int ret;

	total_mem = get_total_mem();
	ret = parse_crashkernel(boot_command_line, total_mem,
				&crash_size, &crash_base);
	if (ret)
		return;

988
	if (crash_base <= 0) {
989
		unsigned long long crash_max = idmap_to_phys((u32)~0);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
		crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
						    crash_size, CRASH_ALIGN);
		if (!crash_base) {
			pr_err("crashkernel reservation failed - No suitable area found.\n");
			return;
		}
	} else {
		unsigned long long start;

		start = memblock_find_in_range(crash_base,
					       crash_base + crash_size,
					       crash_size, SECTION_SIZE);
		if (start != crash_base) {
			pr_err("crashkernel reservation failed - memory is in use.\n");
			return;
		}
	}

1008
	ret = memblock_reserve(crash_base, crash_size);
1009
	if (ret < 0) {
1010
1011
		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
			(unsigned long)crash_base);
1012
1013
1014
		return;
	}

1015
1016
1017
1018
	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
		(unsigned long)(crash_size >> 20),
		(unsigned long)(crash_base >> 20),
		(unsigned long)(total_mem >> 20));
1019

1020
	/* The crashk resource must always be located in normal mem */
1021
1022
1023
	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
	insert_resource(&iomem_resource, &crashk_res);
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038

	if (arm_has_idmap_alias()) {
		/*
		 * If we have a special RAM alias for use at boot, we
		 * need to advertise to kexec tools where the alias is.
		 */
		static struct resource crashk_boot_res = {
			.name = "Crash kernel (boot alias)",
			.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
		};

		crashk_boot_res.start = phys_to_idmap(crash_base);
		crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
		insert_resource(&iomem_resource, &crashk_boot_res);
	}
1039
1040
1041
1042
1043
}
#else
static inline void reserve_crashkernel(void) {}
#endif /* CONFIG_KEXEC */

1044
1045
1046
void __init hyp_mode_check(void)
{
#ifdef CONFIG_ARM_VIRT_EXT
1047
1048
	sync_boot_mode();

1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
	if (is_hyp_mode_available()) {
		pr_info("CPU: All CPU(s) started in HYP mode.\n");
		pr_info("CPU: Virtualization extensions available.\n");
	} else if (is_hyp_mode_mismatched()) {
		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
			__boot_cpu_mode & MODE_MASK);
		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
	} else
		pr_info("CPU: All CPU(s) started in SVC mode.\n");
#endif
}

1061
1062
void __init setup_arch(char **cmdline_p)
{
1063
	const struct machine_desc *mdesc;
1064
1065

	setup_processor();
1066
1067
	mdesc = setup_machine_fdt(__atags_pointer);
	if (!mdesc)
1068
		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1069
1070
	machine_desc = mdesc;
	machine_name = mdesc->name;
1071
	dump_stack_set_arch_desc("%s", mdesc->name);
1072

1073
1074
	if (mdesc->reboot_mode != REBOOT_HARD)
		reboot_mode = mdesc->reboot_mode;
1075

Russell King's avatar
Russell King committed
1076
1077
1078
1079
	init_mm.start_code = (unsigned long) _text;
	init_mm.end_code   = (unsigned long) _etext;
	init_mm.end_data   = (unsigned long) _edata;
	init_mm.brk	   = (unsigned long) _end;
Linus Torvalds's avatar
Linus Torvalds committed
1080

1081
1082
1083
	/* populate cmd_line too for later use, preserving boot_command_line */
	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
	*cmdline_p = cmd_line;
1084

1085
1086
	early_fixmap_init();
	early_ioremap_init();
1087

1088
1089
	parse_early_param();

1090
1091
1092
#ifdef CONFIG_MMU
	early_paging_init(mdesc);
#endif
1093
	setup_dma_zone(mdesc);
1094
	xen_early_init();
1095
	efi_init();
1096
1097
1098
1099
	/*
	 * Make sure the calculation for lowmem/highmem is set appropriately
	 * before reserving/allocating any mmeory
	 */
1100
	adjust_lowmem_bounds();
Laura Abbott's avatar
Laura Abbott committed
1101
	arm_memblock_init(mdesc);
1102
1103
	/* Memory may have been removed so recalculate the bounds. */
	adjust_lowmem_bounds();
Russell King's avatar
Russell King committed
1104

1105
1106
	early_ioremap_reset();

1107
	paging_init(mdesc);
1108
	request_standard_resources(mdesc);
Linus Torvalds's avatar
Linus Torvalds committed
1109

1110
1111
1112
	if (mdesc->restart)
		arm_pm_restart = mdesc->restart;

1113
1114
	unflatten_device_tree();

1115
	arm_dt_init_cpu_maps();
1116
	psci_dt_init();
1117
#ifdef CONFIG_SMP
1118
	if (is_smp()) {
1119
1120
1121
1122
1123
1124
		if (!mdesc->smp_init || !mdesc->smp_init()) {
			if (psci_smp_available())
				smp_set_ops(&psci_smp_ops);
			else if (mdesc->smp)
				smp_set_ops(mdesc->smp);
		}
1125
		smp_init_cpus();
1126
		smp_build_mpidr_hash();
1127
	}
1128
#endif
1129
1130
1131
1132

	if (!is_smp())
		hyp_mode_check();

1133
	reserve_crashkernel();
1134

1135
1136
1137
#ifdef CONFIG_MULTI_IRQ_HANDLER
	handle_arch_irq = mdesc->handle_irq;
#endif
Linus Torvalds's avatar
Linus Torvalds committed
1138
1139
1140
1141
1142
1143
1144
1145

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif
1146
1147
1148

	if (mdesc->init_early)
		mdesc->init_early();
Linus Torvalds's avatar
Linus Torvalds committed
1149
1150
1151
1152
1153
1154
1155
}


static int __init topology_init(void)
{
	int cpu;

1156
1157
	for_each_possible_cpu(cpu) {
		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1158
		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1159
1160
		register_cpu(&cpuinfo->cpu, cpu);
	}
Linus Torvalds's avatar
Linus Torvalds committed
1161
1162
1163
1164
1165

	return 0;
}
subsys_initcall(topology_init);

1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
{
	struct proc_dir_entry *res;

	res = proc_mkdir("cpu", NULL);
	if (!res)
		return -ENOMEM;
	return 0;
}
fs_initcall(proc_cpu_init);
#endif

Linus Torvalds's avatar
Linus Torvalds committed
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
static const char *hwcap_str[] = {
	"swp",
	"half",
	"thumb",
	"26bit",
	"fastmult",
	"fpa",
	"vfp",
	"edsp",
	"java",
1189
	"iwmmxt",
1190
	"crunch",
1191
	"thumbee",
1192
	"neon",
1193
1194
	"vfpv3",
	"vfpv3d16",
1195
1196
1197
1198
	"tls",
	"vfpv4",
	"idiva",
	"idivt",
1199
	"vfpd32",
1200
	"lpae",
1201
	"evtstrm",
Linus Torvalds's avatar
Linus Torvalds committed
1202
1203
1204
	NULL
};

1205
static const char *hwcap2_str[] = {
1206
1207
1208
1209
1210
	"aes",
	"pmull",
	"sha1",
	"sha2",
	"crc32",
1211
1212
1213
	NULL
};

Linus Torvalds's avatar
Linus Torvalds committed
1214
1215
static int c_show(struct seq_file *m, void *v)
{
1216
1217
	int i, j;
	u32 cpuid;
Linus Torvalds's avatar
Linus Torvalds committed
1218
1219

	for_each_online_cpu(i) {
1220
1221
1222
1223
1224
1225
		/*
		 * glibc reads /proc/cpuinfo to determine the number of
		 * online processors, looking for lines beginning with
		 * "processor".  Give glibc what it expects.
		 */
		seq_printf(m, "processor\t: %d\n", i);
1226
1227
1228
1229
		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
		seq_printf(m, "model name\t: %s rev %d (%s)\n",
			   cpu_name, cpuid & 15, elf_platform);

1230
1231
1232
1233
1234
1235
1236
1237
1238
#if defined(CONFIG_SMP)
		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
#else
		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
			   loops_per_jiffy / (500000/HZ),
			   (loops_per_jiffy / (5000/HZ)) % 100);
#endif
1239
1240
		/* dump out the processor features */
		seq_puts(m, "Features\t: ");
Linus Torvalds's avatar
Linus Torvalds committed
1241

1242
1243
1244
		for (j = 0; hwcap_str[j]; j++)
			if (elf_hwcap & (1 << j))
				seq_printf(m, "%s ", hwcap_str[j]);
Linus Torvalds's avatar
Linus Torvalds committed
1245

1246
1247
1248
1249
		for (j = 0; hwcap2_str[j]; j++)
			if (elf_hwcap2 & (1 << j))
				seq_printf(m, "%s ", hwcap2_str[j]);

1250
1251
1252
		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
		seq_printf(m, "CPU architecture: %s\n",
			   proc_arch[cpu_architecture()]);
Linus Torvalds's avatar
Linus Torvalds committed
1253

1254