Commit d4f4cf77 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM updates from Russell King:

 - nommu updates from Afzal Mohammed cleaning up the vectors support

 - allow DMA memory "mapping" for nommu Benjamin Gaignard

 - fixing a correctness issue with R_ARM_PREL31 relocations in the
   module linker

 - add strlen() prototype for the decompressor

 - support for DEBUG_VIRTUAL from Florian Fainelli

 - adjusting memory bounds after memory reservations have been
   registered

 - unipher cache handling updates from Masahiro Yamada

 - initrd and Thumb Kconfig cleanups

* 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (23 commits)
  ARM: mm: round the initrd reservation to page boundaries
  ARM: mm: clean up initrd initialisation
  ARM: mm: move initrd init code out of arm_memblock_init()
  ARM: 8655/1: improve NOMMU definition of pgprot_*()
  ARM: 8654/1: decompressor: add strlen prototype
  ARM: 8652/1: cache-uniphier: clean up active way setup code
  ARM: 8651/1: cache-uniphier: include <linux/errno.h> instead of <linux/types.h>
  ARM: 8650/1: module: handle negative R_ARM_PREL31 addends correctly
  ARM: 8649/2: nommu: remove Hivecs configuration is asm
  ARM: 8648/2: nommu: display vectors base
  ARM: 8647/2: nommu: dynamic exception base address setting
  ARM: 8646/1: mmu: decouple VECTORS_BASE from Kconfig
  ARM: 8644/1: Reduce "CPU: shutdown" message to debug level
  ARM: 8641/1: treewide: Replace uses of virt_to_phys with __pa_symbol
  ARM: 8640/1: Add support for CONFIG_DEBUG_VIRTUAL
  ARM: 8639/1: Define KERNEL_START and KERNEL_END
  ARM: 8638/1: mtd: lart: Rename partition defines to be prefixed with PART_
  ARM: 8637/1: Adjust memory boundaries after reservations
  ARM: 8636/1: Cleanup sanity_check_meminfo
  ARM: add CPU_THUMB_CAPABLE to indicate possible Thumb support
  ...
parents f89db789 17a870be
...@@ -2,6 +2,7 @@ config ARM ...@@ -2,6 +2,7 @@ config ARM
bool bool
default y default y
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_SET_MEMORY select ARCH_HAS_SET_MEMORY
......
...@@ -34,8 +34,7 @@ config PROCESSOR_ID ...@@ -34,8 +34,7 @@ config PROCESSOR_ID
used instead of the auto-probing which utilizes the register. used instead of the auto-probing which utilizes the register.
config REMAP_VECTORS_TO_RAM config REMAP_VECTORS_TO_RAM
bool 'Install vectors to the beginning of RAM' if DRAM_BASE bool 'Install vectors to the beginning of RAM'
depends on DRAM_BASE
help help
The kernel needs to change the hardware exception vectors. The kernel needs to change the hardware exception vectors.
In nommu mode, the hardware exception vectors are normally In nommu mode, the hardware exception vectors are normally
......
...@@ -32,6 +32,7 @@ extern void error(char *); ...@@ -32,6 +32,7 @@ extern void error(char *);
/* Not needed, but used in some headers pulled in by decompressors */ /* Not needed, but used in some headers pulled in by decompressors */
extern char * strstr(const char * s1, const char *s2); extern char * strstr(const char * s1, const char *s2);
extern size_t strlen(const char *s);
#ifdef CONFIG_KERNEL_GZIP #ifdef CONFIG_KERNEL_GZIP
#include "../../../../lib/decompress_inflate.c" #include "../../../../lib/decompress_inflate.c"
......
...@@ -144,7 +144,7 @@ extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; ...@@ -144,7 +144,7 @@ extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
{ {
unsigned long val = ptr ? virt_to_phys(ptr) : 0; unsigned long val = ptr ? __pa_symbol(ptr) : 0;
mcpm_entry_vectors[cluster][cpu] = val; mcpm_entry_vectors[cluster][cpu] = val;
sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
} }
...@@ -299,8 +299,8 @@ void mcpm_cpu_power_down(void) ...@@ -299,8 +299,8 @@ void mcpm_cpu_power_down(void)
* the kernel as if the power_up method just had deasserted reset * the kernel as if the power_up method just had deasserted reset
* on the CPU. * on the CPU.
*/ */
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point)); phys_reset(__pa_symbol(mcpm_entry_point));
/* should never get here */ /* should never get here */
BUG(); BUG();
...@@ -388,8 +388,8 @@ static int __init nocache_trampoline(unsigned long _arg) ...@@ -388,8 +388,8 @@ static int __init nocache_trampoline(unsigned long _arg)
__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
__mcpm_cpu_down(cpu, cluster); __mcpm_cpu_down(cpu, cluster);
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
phys_reset(virt_to_phys(mcpm_entry_point)); phys_reset(__pa_symbol(mcpm_entry_point));
BUG(); BUG();
} }
...@@ -449,7 +449,7 @@ int __init mcpm_sync_init( ...@@ -449,7 +449,7 @@ int __init mcpm_sync_init(
sync_cache_w(&mcpm_sync); sync_cache_w(&mcpm_sync);
if (power_up_setup) { if (power_up_setup) {
mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); mcpm_power_up_setup_phys = __pa_symbol(power_up_setup);
sync_cache_w(&mcpm_power_up_setup_phys); sync_cache_w(&mcpm_power_up_setup_phys);
} }
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#ifndef __CACHE_UNIPHIER_H #ifndef __CACHE_UNIPHIER_H
#define __CACHE_UNIPHIER_H #define __CACHE_UNIPHIER_H
#include <linux/types.h> #include <linux/errno.h>
#ifdef CONFIG_CACHE_UNIPHIER #ifdef CONFIG_CACHE_UNIPHIER
int uniphier_cache_init(void); int uniphier_cache_init(void);
......
...@@ -83,8 +83,15 @@ ...@@ -83,8 +83,15 @@
#define IOREMAP_MAX_ORDER 24 #define IOREMAP_MAX_ORDER 24
#endif #endif
#define VECTORS_BASE UL(0xffff0000)
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
#ifndef __ASSEMBLY__
extern unsigned long vectors_base;
#define VECTORS_BASE vectors_base
#endif
/* /*
* The limitation of user task size can grow up to the end of free ram region. * The limitation of user task size can grow up to the end of free ram region.
* It is difficult to define and perhaps will never meet the original meaning * It is difficult to define and perhaps will never meet the original meaning
...@@ -111,6 +118,13 @@ ...@@ -111,6 +118,13 @@
#endif /* !CONFIG_MMU */ #endif /* !CONFIG_MMU */
#ifdef CONFIG_XIP_KERNEL
#define KERNEL_START _sdata
#else
#define KERNEL_START _stext
#endif
#define KERNEL_END _end
/* /*
* We fix the TCM memories max 32 KiB ITCM resp DTCM at these * We fix the TCM memories max 32 KiB ITCM resp DTCM at these
* locations * locations
...@@ -206,7 +220,7 @@ extern const void *__pv_table_begin, *__pv_table_end; ...@@ -206,7 +220,7 @@ extern const void *__pv_table_begin, *__pv_table_end;
: "r" (x), "I" (__PV_BITS_31_24) \ : "r" (x), "I" (__PV_BITS_31_24) \
: "cc") : "cc")
static inline phys_addr_t __virt_to_phys(unsigned long x) static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{ {
phys_addr_t t; phys_addr_t t;
...@@ -238,7 +252,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -238,7 +252,7 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#define PHYS_OFFSET PLAT_PHYS_OFFSET #define PHYS_OFFSET PLAT_PHYS_OFFSET
#define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT)) #define PHYS_PFN_OFFSET ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
static inline phys_addr_t __virt_to_phys(unsigned long x) static inline phys_addr_t __virt_to_phys_nodebug(unsigned long x)
{ {
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET; return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
} }
...@@ -254,6 +268,16 @@ static inline unsigned long __phys_to_virt(phys_addr_t x) ...@@ -254,6 +268,16 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \ ((((unsigned long)(kaddr) - PAGE_OFFSET) >> PAGE_SHIFT) + \
PHYS_PFN_OFFSET) PHYS_PFN_OFFSET)
#define __pa_symbol_nodebug(x) __virt_to_phys_nodebug((x))
#ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x);
extern phys_addr_t __phys_addr_symbol(unsigned long x);
#else
#define __virt_to_phys(x) __virt_to_phys_nodebug(x)
#define __phys_addr_symbol(x) __pa_symbol_nodebug(x)
#endif
/* /*
* These are *only* valid on the kernel direct mapped RAM memory. * These are *only* valid on the kernel direct mapped RAM memory.
* Note: Drivers should NOT use these. They are the wrong * Note: Drivers should NOT use these. They are the wrong
...@@ -276,6 +300,7 @@ static inline void *phys_to_virt(phys_addr_t x) ...@@ -276,6 +300,7 @@ static inline void *phys_to_virt(phys_addr_t x)
* Drivers should NOT use these either. * Drivers should NOT use these either.
*/ */
#define __pa(x) __virt_to_phys((unsigned long)(x)) #define __pa(x) __virt_to_phys((unsigned long)(x))
#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT) #define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
......
...@@ -63,9 +63,9 @@ typedef pte_t *pte_addr_t; ...@@ -63,9 +63,9 @@ typedef pte_t *pte_addr_t;
/* /*
* Mark the prot value as uncacheable and unbufferable. * Mark the prot value as uncacheable and unbufferable.
*/ */
#define pgprot_noncached(prot) __pgprot(0) #define pgprot_noncached(prot) (prot)
#define pgprot_writecombine(prot) __pgprot(0) #define pgprot_writecombine(prot) (prot)
#define pgprot_dmacoherent(prot) __pgprot(0) #define pgprot_dmacoherent(prot) (prot)
/* /*
......
...@@ -151,11 +151,6 @@ __after_proc_init: ...@@ -151,11 +151,6 @@ __after_proc_init:
#endif #endif
#ifdef CONFIG_CPU_ICACHE_DISABLE #ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I bic r0, r0, #CR_I
#endif
#ifdef CONFIG_CPU_HIGH_VECTOR
orr r0, r0, #CR_V
#else
bic r0, r0, #CR_V
#endif #endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg mcr p15, 0, r0, c1, c0, 0 @ write control reg
#elif defined (CONFIG_CPU_V7M) #elif defined (CONFIG_CPU_V7M)
......
...@@ -155,8 +155,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, ...@@ -155,8 +155,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
break; break;
case R_ARM_PREL31: case R_ARM_PREL31:
offset = *(u32 *)loc + sym->st_value - loc; offset = (*(s32 *)loc << 1) >> 1; /* sign extend */
*(u32 *)loc = offset & 0x7fffffff; offset += sym->st_value - loc;
if (offset >= 0x40000000 || offset < -0x40000000) {
pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
module->name, relindex, i, symname,
ELF32_R_TYPE(rel->r_info), loc,
sym->st_value);
return -ENOEXEC;
}
*(u32 *)loc &= 0x80000000;
*(u32 *)loc |= offset & 0x7fffffff;
break; break;
case R_ARM_MOVW_ABS_NC: case R_ARM_MOVW_ABS_NC:
......
...@@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup); ...@@ -81,7 +81,7 @@ __setup("fpe=", fpe_setup);
extern void init_default_cache_policy(unsigned long); extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc); extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *); extern void early_paging_init(const struct machine_desc *);
extern void sanity_check_meminfo(void); extern void adjust_lowmem_bounds(void);
extern enum reboot_mode reboot_mode; extern enum reboot_mode reboot_mode;
extern void setup_dma_zone(const struct machine_desc *desc); extern void setup_dma_zone(const struct machine_desc *desc);
...@@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p) ...@@ -1093,8 +1093,14 @@ void __init setup_arch(char **cmdline_p)
setup_dma_zone(mdesc); setup_dma_zone(mdesc);
xen_early_init(); xen_early_init();
efi_init(); efi_init();
sanity_check_meminfo(); /*
* Make sure the calculation for lowmem/highmem is set appropriately
* before reserving/allocating any mmeory
*/
adjust_lowmem_bounds();
arm_memblock_init(mdesc); arm_memblock_init(mdesc);
/* Memory may have been removed so recalculate the bounds. */
adjust_lowmem_bounds();
early_ioremap_reset(); early_ioremap_reset();
......
...@@ -251,7 +251,7 @@ void __cpu_die(unsigned int cpu) ...@@ -251,7 +251,7 @@ void __cpu_die(unsigned int cpu)
pr_err("CPU%u: cpu didn't die\n", cpu); pr_err("CPU%u: cpu didn't die\n", cpu);
return; return;
} }
pr_notice("CPU%u: shutdown\n", cpu); pr_debug("CPU%u: shutdown\n", cpu);
/* /*
* platform_cpu_kill() is generally expected to do the powering off * platform_cpu_kill() is generally expected to do the powering off
......
...@@ -27,7 +27,7 @@ static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -27,7 +27,7 @@ static int alpine_boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
phys_addr_t addr; phys_addr_t addr;
addr = virt_to_phys(secondary_startup); addr = __pa_symbol(secondary_startup);
if (addr > (phys_addr_t)(uint32_t)(-1)) { if (addr > (phys_addr_t)(uint32_t)(-1)) {
pr_err("FAIL: resume address over 32bit (%pa)", &addr); pr_err("FAIL: resume address over 32bit (%pa)", &addr);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
static void write_release_addr(u32 release_phys) static void write_release_addr(u32 release_phys)
{ {
u32 *virt = (u32 *) phys_to_virt(release_phys); u32 *virt = (u32 *) phys_to_virt(release_phys);
writel_relaxed(virt_to_phys(secondary_startup), virt); writel_relaxed(__pa_symbol(secondary_startup), virt);
/* Make sure this store is visible to other CPUs */ /* Make sure this store is visible to other CPUs */
smp_wmb(); smp_wmb();
__cpuc_flush_dcache_area(virt, sizeof(u32)); __cpuc_flush_dcache_area(virt, sizeof(u32));
......
...@@ -135,7 +135,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu, ...@@ -135,7 +135,7 @@ static int bcm63138_smp_boot_secondary(unsigned int cpu,
} }
/* Write the secondary init routine to the BootLUT reset vector */ /* Write the secondary init routine to the BootLUT reset vector */
val = virt_to_phys(secondary_startup); val = __pa_symbol(secondary_startup);
writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT); writel_relaxed(val, bootlut_base + BOOTLUT_RESET_VECT);
/* Power up the core, will jump straight to its reset vector when we /* Power up the core, will jump straight to its reset vector when we
......
...@@ -151,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu) ...@@ -151,7 +151,7 @@ static void brcmstb_cpu_boot(u32 cpu)
* Set the reset vector to point to the secondary_startup * Set the reset vector to point to the secondary_startup
* routine * routine
*/ */
cpu_set_boot_addr(cpu, virt_to_phys(secondary_startup)); cpu_set_boot_addr(cpu, __pa_symbol(secondary_startup));
/* Unhalt the cpu */ /* Unhalt the cpu */
cpu_rst_cfg_set(cpu, 0); cpu_rst_cfg_set(cpu, 0);
......
...@@ -116,7 +116,7 @@ static int nsp_write_lut(unsigned int cpu) ...@@ -116,7 +116,7 @@ static int nsp_write_lut(unsigned int cpu)
return -ENOMEM; return -ENOMEM;
} }
secondary_startup_phy = virt_to_phys(secondary_startup); secondary_startup_phy = __pa_symbol(secondary_startup);
BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX); BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX);
writel_relaxed(secondary_startup_phy, sku_rom_lut); writel_relaxed(secondary_startup_phy, sku_rom_lut);
...@@ -189,7 +189,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -189,7 +189,7 @@ static int kona_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Secondary cores will start in secondary_startup(), * Secondary cores will start in secondary_startup(),
* defined in "arch/arm/kernel/head.S" * defined in "arch/arm/kernel/head.S"
*/ */
boot_func = virt_to_phys(secondary_startup); boot_func = __pa_symbol(secondary_startup);
BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK); BUG_ON(boot_func & BOOT_ADDR_CPUID_MASK);
BUG_ON(boot_func > (phys_addr_t)U32_MAX); BUG_ON(boot_func > (phys_addr_t)U32_MAX);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cp15.h> #include <asm/cp15.h>
#include <asm/memory.h>
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
#include <asm/smp_scu.h> #include <asm/smp_scu.h>
...@@ -75,7 +76,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus) ...@@ -75,7 +76,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
if (!cpu_ctrl) if (!cpu_ctrl)
goto unmap_scu; goto unmap_scu;
vectors_base = ioremap(CONFIG_VECTORS_BASE, SZ_32K); vectors_base = ioremap(VECTORS_BASE, SZ_32K);
if (!vectors_base) if (!vectors_base)
goto unmap_scu; goto unmap_scu;
...@@ -92,7 +93,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus) ...@@ -92,7 +93,7 @@ static void __init berlin_smp_prepare_cpus(unsigned int max_cpus)
* Write the secondary startup address into the SW reset address * Write the secondary startup address into the SW reset address
* vector. This is used by boot_inst. * vector. This is used by boot_inst.
*/ */
writel(virt_to_phys(secondary_startup), vectors_base + SW_RESET_ADDR); writel(__pa_symbol(secondary_startup), vectors_base + SW_RESET_ADDR);
iounmap(vectors_base); iounmap(vectors_base);
unmap_scu: unmap_scu:
......
...@@ -41,7 +41,7 @@ static int exynos_do_idle(unsigned long mode) ...@@ -41,7 +41,7 @@ static int exynos_do_idle(unsigned long mode)
case FW_DO_IDLE_AFTR: case FW_DO_IDLE_AFTR:
if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9) if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
exynos_save_cp15(); exynos_save_cp15();
writel_relaxed(virt_to_phys(exynos_cpu_resume_ns), writel_relaxed(__pa_symbol(exynos_cpu_resume_ns),
sysram_ns_base_addr + 0x24); sysram_ns_base_addr + 0x24);
writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20); writel_relaxed(EXYNOS_AFTR_MAGIC, sysram_ns_base_addr + 0x20);
if (soc_is_exynos3250()) { if (soc_is_exynos3250()) {
...@@ -135,7 +135,7 @@ static int exynos_suspend(void) ...@@ -135,7 +135,7 @@ static int exynos_suspend(void)
exynos_save_cp15(); exynos_save_cp15();
writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG); writel(EXYNOS_SLEEP_MAGIC, sysram_ns_base_addr + EXYNOS_BOOT_FLAG);
writel(virt_to_phys(exynos_cpu_resume_ns), writel(__pa_symbol(exynos_cpu_resume_ns),
sysram_ns_base_addr + EXYNOS_BOOT_ADDR); sysram_ns_base_addr + EXYNOS_BOOT_ADDR);
return cpu_suspend(0, exynos_cpu_suspend); return cpu_suspend(0, exynos_cpu_suspend);
......
...@@ -221,7 +221,7 @@ static void exynos_mcpm_setup_entry_point(void) ...@@ -221,7 +221,7 @@ static void exynos_mcpm_setup_entry_point(void)
*/ */
__raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */ __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */
__raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */ __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */
__raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8); __raw_writel(__pa_symbol(mcpm_entry_point), ns_sram_base_addr + 8);
} }
static struct syscore_ops exynos_mcpm_syscore_ops = { static struct syscore_ops exynos_mcpm_syscore_ops = {
......
...@@ -353,7 +353,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) ...@@ -353,7 +353,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
smp_rmb(); smp_rmb();
boot_addr = virt_to_phys(exynos4_secondary_startup); boot_addr = __pa_symbol(exynos4_secondary_startup);
ret = exynos_set_boot_addr(core_id, boot_addr); ret = exynos_set_boot_addr(core_id, boot_addr);
if (ret) if (ret)
...@@ -413,7 +413,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus) ...@@ -413,7 +413,7 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
mpidr = cpu_logical_map(i); mpidr = cpu_logical_map(i);
core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
boot_addr = virt_to_phys(exynos4_secondary_startup); boot_addr = __pa_symbol(exynos4_secondary_startup);
ret = exynos_set_boot_addr(core_id, boot_addr); ret = exynos_set_boot_addr(core_id, boot_addr);
if (ret) if (ret)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment