Skip to content
Snippets Groups Projects
Commit d3511f53 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull parisc updates from Helge Deller:
 "Many great new features, fixes and optimizations, including:

   - Convert page table updates to use per-pagetable spinlocks which
     overall improves performance on SMP machines a lot, by Mikulas
     Patocka

   - Kernel debugger (KGDB) support, by Sven Schnelle

   - KPROBES support, by Sven Schnelle

   - Lots of TLB lock/flush improvements, by Dave Anglin

   - Drop DISCONTIGMEM and switch to SPARSEMEM

   - Added JUMP_LABEL, branch runtime-patching support

   - Lots of other small speedups and cleanups, e.g. for QEMU, stack
     randomization, avoidance of name clashes, documentation updates,
     etc ..."

* 'parisc-5.2-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: (28 commits)
  parisc: Add static branch and JUMP_LABEL feature
  parisc: Use PA_ASM_LEVEL in boot code
  parisc: Rename LEVEL to PA_ASM_LEVEL to avoid name clash with DRBD code
  parisc: Update huge TLB page support to use per-pagetable spinlock
  parisc: Use per-pagetable spinlock
  parisc: Allow live-patching of __meminit functions
  parisc: Add memory barrier to asm pdc and sync instructions
  parisc: Add memory clobber to TLB purges
  parisc: Use ldcw instruction for SMP spinlock release barrier
  parisc: Remove lock code to serialize TLB operations in pacache.S
  parisc: Switch from DISCONTIGMEM to SPARSEMEM
  parisc: enable wide mode early
  parisc: update feature lists
  parisc: Show n/a if product number not available
  parisc: remove unused flags parameter in __patch_text()
  doc: update kprobes supported architecture list
  parisc: Implement kretprobes
  parisc: remove kprobes.h from generic-y
  parisc: Implement kprobes
  parisc: add functions required by KPROBE_EVENTS
  ...
parents 02aff8db 62217beb
No related branches found
No related tags found
No related merge requests found
Showing
with 298 additions and 110 deletions
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
| nds32: | TODO | | nds32: | TODO |
| nios2: | ok | | nios2: | ok |
| openrisc: | TODO | | openrisc: | TODO |
| parisc: | TODO | | parisc: | ok |
| powerpc: | ok | | powerpc: | ok |
| riscv: | TODO | | riscv: | TODO |
| s390: | TODO | | s390: | TODO |
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
| nds32: | TODO | | nds32: | TODO |
| nios2: | TODO | | nios2: | TODO |
| openrisc: | TODO | | openrisc: | TODO |
| parisc: | TODO | | parisc: | ok |
| powerpc: | ok | | powerpc: | ok |
| riscv: | ok | | riscv: | ok |
| s390: | ok | | s390: | ok |
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
| nds32: | TODO | | nds32: | TODO |
| nios2: | TODO | | nios2: | TODO |
| openrisc: | TODO | | openrisc: | TODO |
| parisc: | TODO | | parisc: | ok |
| powerpc: | ok | | powerpc: | ok |
| riscv: | TODO | | riscv: | TODO |
| s390: | ok | | s390: | ok |
......
...@@ -321,6 +321,7 @@ architectures: ...@@ -321,6 +321,7 @@ architectures:
- ppc - ppc
- mips - mips
- s390 - s390
- parisc
Configuring Kprobes Configuring Kprobes
=================== ===================
......
...@@ -36,6 +36,7 @@ config PARISC ...@@ -36,6 +36,7 @@ config PARISC
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_ARCH_UNALIGN_ALLOW
select SYSCTL_EXCEPTION_TRACE select SYSCTL_EXCEPTION_TRACE
select ARCH_DISCARD_MEMBLOCK
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select VIRT_TO_BUS select VIRT_TO_BUS
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
...@@ -44,6 +45,8 @@ config PARISC ...@@ -44,6 +45,8 @@ config PARISC
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HASH select HAVE_ARCH_HASH
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
...@@ -54,6 +57,9 @@ config PARISC ...@@ -54,6 +57,9 @@ config PARISC
select CPU_NO_EFFICIENT_FFS select CPU_NO_EFFICIENT_FFS
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
select HAVE_ARCH_KGDB
select HAVE_KPROBES
select HAVE_KRETPROBES
help help
The PA-RISC microprocessor is designed by Hewlett-Packard and used The PA-RISC microprocessor is designed by Hewlett-Packard and used
...@@ -305,21 +311,16 @@ config ARCH_SELECT_MEMORY_MODEL ...@@ -305,21 +311,16 @@ config ARCH_SELECT_MEMORY_MODEL
def_bool y def_bool y
depends on 64BIT depends on 64BIT
config ARCH_DISCONTIGMEM_ENABLE config ARCH_SPARSEMEM_ENABLE
def_bool y def_bool y
depends on 64BIT depends on 64BIT
config ARCH_FLATMEM_ENABLE config ARCH_FLATMEM_ENABLE
def_bool y def_bool y
config ARCH_DISCONTIGMEM_DEFAULT config ARCH_SPARSEMEM_DEFAULT
def_bool y def_bool y
depends on ARCH_DISCONTIGMEM_ENABLE depends on ARCH_SPARSEMEM_ENABLE
config NODES_SHIFT
int
default "3"
depends on NEED_MULTIPLE_NODES
source "kernel/Kconfig.hz" source "kernel/Kconfig.hz"
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
__HEAD __HEAD
ENTRY(startup) ENTRY(startup)
.level LEVEL .level PA_ASM_LEVEL
#define PSW_W_SM 0x200 #define PSW_W_SM 0x200
#define PSW_W_BIT 36 #define PSW_W_BIT 36
...@@ -63,7 +63,7 @@ $bss_loop: ...@@ -63,7 +63,7 @@ $bss_loop:
load32 BOOTADDR(decompress_kernel),%r3 load32 BOOTADDR(decompress_kernel),%r3
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
.level LEVEL .level PA_ASM_LEVEL
ssm PSW_W_SM, %r0 /* set W-bit */ ssm PSW_W_SM, %r0 /* set W-bit */
depdi 0, 31, 32, %r3 depdi 0, 31, 32, %r3
#endif #endif
...@@ -72,7 +72,7 @@ $bss_loop: ...@@ -72,7 +72,7 @@ $bss_loop:
startup_continue: startup_continue:
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
.level LEVEL .level PA_ASM_LEVEL
rsm PSW_W_SM, %r0 /* clear W-bit */ rsm PSW_W_SM, %r0 /* clear W-bit */
#endif #endif
......
...@@ -145,14 +145,13 @@ static int putchar(int c) ...@@ -145,14 +145,13 @@ static int putchar(int c)
void __noreturn error(char *x) void __noreturn error(char *x)
{ {
puts("\n\n"); if (x) puts(x);
puts(x); puts("\n -- System halted\n");
puts("\n\n -- System halted");
while (1) /* wait forever */ while (1) /* wait forever */
; ;
} }
static int print_hex(unsigned long num) static int print_num(unsigned long num, int base)
{ {
const char hex[] = "0123456789abcdef"; const char hex[] = "0123456789abcdef";
char str[40]; char str[40];
...@@ -160,12 +159,14 @@ static int print_hex(unsigned long num) ...@@ -160,12 +159,14 @@ static int print_hex(unsigned long num)
str[i--] = '\0'; str[i--] = '\0';
do { do {
str[i--] = hex[num & 0x0f]; str[i--] = hex[num % base];
num >>= 4; num = num / base;
} while (num); } while (num);
str[i--] = 'x'; if (base == 16) {
str[i] = '0'; str[i--] = 'x';
str[i] = '0';
} else i++;
puts(&str[i]); puts(&str[i]);
return 0; return 0;
...@@ -187,8 +188,9 @@ int printf(const char *fmt, ...) ...@@ -187,8 +188,9 @@ int printf(const char *fmt, ...)
if (fmt[++i] == '%') if (fmt[++i] == '%')
goto put; goto put;
print_num(va_arg(args, unsigned long),
fmt[i] == 'x' ? 16:10);
++i; ++i;
print_hex(va_arg(args, unsigned long));
} }
va_end(args); va_end(args);
...@@ -327,8 +329,15 @@ unsigned long decompress_kernel(unsigned int started_wide, ...@@ -327,8 +329,15 @@ unsigned long decompress_kernel(unsigned int started_wide,
free_mem_end_ptr = rd_start; free_mem_end_ptr = rd_start;
#endif #endif
if (free_mem_ptr >= free_mem_end_ptr) if (free_mem_ptr >= free_mem_end_ptr) {
error("Kernel too big for machine."); int free_ram;
free_ram = (free_mem_ptr >> 20) + 1;
if (free_ram < 32)
free_ram = 32;
printf("\nKernel requires at least %d MB RAM.\n",
free_ram);
error(NULL);
}
#ifdef DEBUG #ifdef DEBUG
printf("\n"); printf("\n");
......
...@@ -10,7 +10,6 @@ generic-y += hw_irq.h ...@@ -10,7 +10,6 @@ generic-y += hw_irq.h
generic-y += irq_regs.h generic-y += irq_regs.h
generic-y += irq_work.h generic-y += irq_work.h
generic-y += kdebug.h generic-y += kdebug.h
generic-y += kprobes.h
generic-y += kvm_para.h generic-y += kvm_para.h
generic-y += local.h generic-y += local.h
generic-y += local64.h generic-y += local64.h
......
...@@ -61,14 +61,14 @@ ...@@ -61,14 +61,14 @@
#define LDCW ldcw,co #define LDCW ldcw,co
#define BL b,l #define BL b,l
# ifdef CONFIG_64BIT # ifdef CONFIG_64BIT
# define LEVEL 2.0w # define PA_ASM_LEVEL 2.0w
# else # else
# define LEVEL 2.0 # define PA_ASM_LEVEL 2.0
# endif # endif
#else #else
#define LDCW ldcw #define LDCW ldcw
#define BL bl #define BL bl
#define LEVEL 1.1 #define PA_ASM_LEVEL 1.1
#endif #endif
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
......
...@@ -44,22 +44,22 @@ void parisc_setup_cache_timing(void); ...@@ -44,22 +44,22 @@ void parisc_setup_cache_timing(void);
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \ #define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr)) : : "r" (addr) : "memory")
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \ #define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \ ALTERNATIVE(ALT_COND_NO_SPLIT_TLB, INSN_NOP) \
: : "r" (addr)) : : "r" (addr) : "memory")
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \ #define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" \
ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \ ALTERNATIVE(ALT_COND_NO_SMP, INSN_PxTLB) \
: : "r" (addr)) : : "r" (addr) : "memory")
#define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \ #define asm_io_fdc(addr) asm volatile("fdc %%r0(%0)" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \ ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) \
: : "r" (addr)) : : "r" (addr) : "memory")
#define asm_io_sync() asm volatile("sync" \ #define asm_io_sync() asm volatile("sync" \
ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \ ALTERNATIVE(ALT_COND_NO_DCACHE, INSN_NOP) \
ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :: ) ALTERNATIVE(ALT_COND_NO_IOC_FDC, INSN_NOP) :::"memory")
#endif /* ! __ASSEMBLY__ */ #endif /* ! __ASSEMBLY__ */
......
...@@ -15,17 +15,34 @@ ...@@ -15,17 +15,34 @@
* from areas congruently mapped with user space. It is 8MB large * from areas congruently mapped with user space. It is 8MB large
* and must be 16MB aligned */ * and must be 16MB aligned */
#define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024) #define TMPALIAS_MAP_START ((__PAGE_OFFSET) - 16*1024*1024)
#define FIXMAP_SIZE (FIX_BITMAP_COUNT << PAGE_SHIFT)
#define FIXMAP_START (TMPALIAS_MAP_START - FIXMAP_SIZE)
/* This is the kernel area for all maps (vmalloc, dma etc.) most /* This is the kernel area for all maps (vmalloc, dma etc.) most
* usually, it extends up to TMPALIAS_MAP_START. Virtual addresses * usually, it extends up to TMPALIAS_MAP_START. Virtual addresses
* 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */ * 0..GATEWAY_PAGE_SIZE are reserved for the gateway page */
#define KERNEL_MAP_START (GATEWAY_PAGE_SIZE) #define KERNEL_MAP_START (GATEWAY_PAGE_SIZE)
#define KERNEL_MAP_END (TMPALIAS_MAP_START) #define KERNEL_MAP_END (FIXMAP_START)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
enum fixed_addresses {
/* Support writing RO kernel text via kprobes, jump labels, etc. */
FIX_TEXT_POKE0,
FIX_BITMAP_COUNT
};
extern void *parisc_vmalloc_start; extern void *parisc_vmalloc_start;
#define PCXL_DMA_MAP_SIZE (8*1024*1024) #define PCXL_DMA_MAP_SIZE (8*1024*1024)
#define VMALLOC_START ((unsigned long)parisc_vmalloc_start) #define VMALLOC_START ((unsigned long)parisc_vmalloc_start)
#define VMALLOC_END (KERNEL_MAP_END) #define VMALLOC_END (KERNEL_MAP_END)
#define __fix_to_virt(_x) (FIXMAP_START + ((_x) << PAGE_SHIFT))
void set_fixmap(enum fixed_addresses idx, phys_addr_t phys);
void clear_fixmap(enum fixed_addresses idx);
#endif /*__ASSEMBLY__*/ #endif /*__ASSEMBLY__*/
#endif /*_ASM_FIXMAP_H*/ #endif /*_ASM_FIXMAP_H*/
...@@ -120,7 +120,7 @@ extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path); ...@@ -120,7 +120,7 @@ extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
extern void init_parisc_bus(void); extern void init_parisc_bus(void);
extern struct device *hwpath_to_device(struct hardware_path *modpath); extern struct device *hwpath_to_device(struct hardware_path *modpath);
extern void device_to_hwpath(struct device *dev, struct hardware_path *path); extern void device_to_hwpath(struct device *dev, struct hardware_path *path);
extern int machine_has_merced_bus(void);
/* inventory.c: */ /* inventory.c: */
extern void do_memory_inventory(void); extern void do_memory_inventory(void);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_PARISC_JUMP_LABEL_H
#define _ASM_PARISC_JUMP_LABEL_H
#ifndef __ASSEMBLY__
#include <linux/types.h>
#include <asm/assembly.h>
#define JUMP_LABEL_NOP_SIZE 4
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
asm_volatile_goto("1:\n\t"
"b,n %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".word 1b - ., %l[l_yes] - .\n\t"
__stringify(ASM_ULONG_INSN) " %c0 - .\n\t"
".popsection\n\t"
: : "i" (&((char *)key)[branch]) : : l_yes);
return false;
l_yes:
return true;
}
#endif /* __ASSEMBLY__ */
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* PA-RISC KGDB support
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*
*/
#ifndef __PARISC_KGDB_H__
#define __PARISC_KGDB_H__
#define BREAK_INSTR_SIZE 4
#define PARISC_KGDB_COMPILED_BREAK_INSN 0x3ffc01f
#define PARISC_KGDB_BREAK_INSN 0x3ffa01f
#define NUMREGBYTES sizeof(struct parisc_gdb_regs)
#define BUFMAX 4096
#define CACHE_FLUSH_IS_SAFE 1
#ifndef __ASSEMBLY__
static inline void arch_kgdb_breakpoint(void)
{
asm(".word %0" : : "i"(PARISC_KGDB_COMPILED_BREAK_INSN) : "memory");
}
struct parisc_gdb_regs {
unsigned long gpr[32];
unsigned long sar;
unsigned long iaoq_f;
unsigned long iasq_f;
unsigned long iaoq_b;
unsigned long iasq_b;
unsigned long eiem;
unsigned long iir;
unsigned long isr;
unsigned long ior;
unsigned long ipsw;
unsigned long __unused0;
unsigned long sr4;
unsigned long sr0;
unsigned long sr1;
unsigned long sr2;
unsigned long sr3;
unsigned long sr5;
unsigned long sr6;
unsigned long sr7;
unsigned long cr0;
unsigned long pid1;
unsigned long pid2;
unsigned long scrccr;
unsigned long pid3;
unsigned long pid4;
unsigned long cr24;
unsigned long cr25;
unsigned long cr26;
unsigned long cr27;
unsigned long cr28;
unsigned long cr29;
unsigned long cr30;
u64 fr[32];
};
#endif
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* arch/parisc/include/asm/kprobes.h
*
* PA-RISC kprobes implementation
*
* Copyright (c) 2019 Sven Schnelle <svens@stackframe.org>
*/
#ifndef _PARISC_KPROBES_H
#define _PARISC_KPROBES_H
#ifdef CONFIG_KPROBES
#include <asm-generic/kprobes.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/notifier.h>
#define PARISC_KPROBES_BREAK_INSN 0x3ff801f
#define __ARCH_WANT_KPROBES_INSN_SLOT
#define MAX_INSN_SIZE 1
typedef u32 kprobe_opcode_t;
struct kprobe;
void arch_remove_kprobe(struct kprobe *p);
#define flush_insn_slot(p) \
flush_icache_range((unsigned long)&(p)->ainsn.insn[0], \
(unsigned long)&(p)->ainsn.insn[0] + \
sizeof(kprobe_opcode_t))
#define kretprobe_blacklist_size 0
struct arch_specific_insn {
kprobe_opcode_t *insn;
};
struct prev_kprobe {
struct kprobe *kp;
unsigned long status;
};
struct kprobe_ctlblk {
unsigned int kprobe_status;
struct prev_kprobe prev_kprobe;
unsigned long iaoq[2];
};
int __kprobes parisc_kprobe_break_handler(struct pt_regs *regs);
int __kprobes parisc_kprobe_ss_handler(struct pt_regs *regs);
#endif /* CONFIG_KPROBES */
#endif /* _PARISC_KPROBES_H */
...@@ -2,62 +2,6 @@ ...@@ -2,62 +2,6 @@
#ifndef _PARISC_MMZONE_H #ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H #define _PARISC_MMZONE_H
#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */ #define MAX_PHYSMEM_RANGES 4 /* Fix the size for now (current known max is 3) */
#ifdef CONFIG_DISCONTIGMEM
extern int npmem_ranges;
struct node_map_data {
pg_data_t pg_data;
};
extern struct node_map_data node_data[];
#define NODE_DATA(nid) (&node_data[nid].pg_data)
/* We have these possible memory map layouts:
* Astro: 0-3.75, 67.75-68, 4-64
* zx1: 0-1, 257-260, 4-256
* Stretch (N-class): 0-2, 4-32, 34-xxx
*/
/* Since each 1GB can only belong to one region (node), we can create
* an index table for pfn to nid lookup; each entry in pfnnid_map
* represents 1GB, and contains the node that the memory belongs to. */
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
#define PFNNID_MAP_MAX 512 /* support 512GB */
extern signed char pfnnid_map[PFNNID_MAP_MAX];
#ifndef CONFIG_64BIT
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
#else
/* io can be 0xf0f0f0f0f0xxxxxx or 0xfffffffff0000000 */
#define pfn_is_io(pfn) ((pfn & (0xf000000000000000UL >> PAGE_SHIFT)) == (0xf000000000000000UL >> PAGE_SHIFT))
#endif
static inline int pfn_to_nid(unsigned long pfn)
{
unsigned int i;
if (unlikely(pfn_is_io(pfn)))
return 0;
i = pfn >> PFNNID_SHIFT;
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
return pfnnid_map[i];
}
static inline int pfn_valid(int pfn)
{
int nid = pfn_to_nid(pfn);
if (nid >= 0)
return (pfn < node_end_pfn(nid));
return 0;
}
#endif
#endif /* _PARISC_MMZONE_H */ #endif /* _PARISC_MMZONE_H */
...@@ -147,9 +147,9 @@ extern int npmem_ranges; ...@@ -147,9 +147,9 @@ extern int npmem_ranges;
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#ifndef CONFIG_DISCONTIGMEM #ifndef CONFIG_SPARSEMEM
#define pfn_valid(pfn) ((pfn) < max_mapnr) #define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_DISCONTIGMEM */ #endif
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */ #define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _PARISC_KERNEL_PATCH_H
#define _PARISC_KERNEL_PATCH_H
/* stop machine and patch kernel text */
void patch_text(void *addr, unsigned int insn);
/* patch kernel text with machine already stopped (e.g. in kgdb) */
void __patch_text(void *addr, unsigned int insn);
#endif
...@@ -41,6 +41,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) ...@@ -41,6 +41,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED); __pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif #endif
} }
spin_lock_init(pgd_spinlock(actual_pgd));
return actual_pgd; return actual_pgd;
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/cache.h> #include <asm/cache.h>
extern spinlock_t pa_tlb_lock; static inline spinlock_t *pgd_spinlock(pgd_t *);
/* /*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
...@@ -34,16 +34,46 @@ extern spinlock_t pa_tlb_lock; ...@@ -34,16 +34,46 @@ extern spinlock_t pa_tlb_lock;
*/ */
#define kern_addr_valid(addr) (1) #define kern_addr_valid(addr) (1)
/* Purge data and instruction TLB entries. Must be called holding /* This is for the serialization of PxTLB broadcasts. At least on the N class
* the pa_tlb_lock. The TLB purge instructions are slow on SMP * systems, only one PxTLB inter processor broadcast can be active at any one
* machines since the purge must be broadcast to all CPUs. * time on the Merced bus.
* PTE updates are protected by locks in the PMD.
*/
extern spinlock_t pa_tlb_flush_lock;
extern spinlock_t pa_swapper_pg_lock;
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
extern int pa_serialize_tlb_flushes;
#else
#define pa_serialize_tlb_flushes (0)
#endif
#define purge_tlb_start(flags) do { \
if (pa_serialize_tlb_flushes) \
spin_lock_irqsave(&pa_tlb_flush_lock, flags); \
else \
local_irq_save(flags); \
} while (0)
#define purge_tlb_end(flags) do { \
if (pa_serialize_tlb_flushes) \
spin_unlock_irqrestore(&pa_tlb_flush_lock, flags); \
else \
local_irq_restore(flags); \
} while (0)
/* Purge data and instruction TLB entries. The TLB purge instructions
* are slow on SMP machines since the purge must be broadcast to all CPUs.
*/ */
static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{ {
unsigned long flags;
purge_tlb_start(flags);
mtsp(mm->context, 1); mtsp(mm->context, 1);
pdtlb(addr); pdtlb(addr);
pitlb(addr); pitlb(addr);
purge_tlb_end(flags);
} }
/* Certain architectures need to do special things when PTEs /* Certain architectures need to do special things when PTEs
...@@ -59,11 +89,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -59,11 +89,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
do { \ do { \
pte_t old_pte; \ pte_t old_pte; \
unsigned long flags; \ unsigned long flags; \
spin_lock_irqsave(&pa_tlb_lock, flags); \ spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
old_pte = *ptep; \ old_pte = *ptep; \
set_pte(ptep, pteval); \ set_pte(ptep, pteval); \
purge_tlb_entries(mm, addr); \ purge_tlb_entries(mm, addr); \
spin_unlock_irqrestore(&pa_tlb_lock, flags); \ spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
} while (0) } while (0)
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
...@@ -88,10 +118,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) ...@@ -88,10 +118,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#if CONFIG_PGTABLE_LEVELS == 3 #if CONFIG_PGTABLE_LEVELS == 3
#define PGD_ORDER 1 /* Number of pages per pgd */ #define PGD_ORDER 1 /* Number of pages per pgd */
#define PMD_ORDER 1 /* Number of pages per pmd */ #define PMD_ORDER 1 /* Number of pages per pmd */
#define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ #define PGD_ALLOC_ORDER (2 + 1) /* first pgd contains pmd */
#else #else
#define PGD_ORDER 1 /* Number of pages per pgd */ #define PGD_ORDER 1 /* Number of pages per pgd */
#define PGD_ALLOC_ORDER PGD_ORDER #define PGD_ALLOC_ORDER (PGD_ORDER + 1)
#endif #endif
/* Definitions for 3rd level (we use PLD here for Page Lower directory /* Definitions for 3rd level (we use PLD here for Page Lower directory
...@@ -459,6 +489,15 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); ...@@ -459,6 +489,15 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
static inline spinlock_t *pgd_spinlock(pgd_t *pgd)
{
if (unlikely(pgd == swapper_pg_dir))
return &pa_swapper_pg_lock;
return (spinlock_t *)((char *)pgd + (PAGE_SIZE << (PGD_ALLOC_ORDER - 1)));
}
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{ {
pte_t pte; pte_t pte;
...@@ -467,15 +506,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned ...@@ -467,15 +506,15 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(*ptep)) if (!pte_young(*ptep))
return 0; return 0;
spin_lock_irqsave(&pa_tlb_lock, flags); spin_lock_irqsave(pgd_spinlock(vma->vm_mm->pgd), flags);
pte = *ptep; pte = *ptep;
if (!pte_young(pte)) { if (!pte_young(pte)) {
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 0; return 0;
} }
set_pte(ptep, pte_mkold(pte)); set_pte(ptep, pte_mkold(pte));
purge_tlb_entries(vma->vm_mm, addr); purge_tlb_entries(vma->vm_mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(pgd_spinlock(vma->vm_mm->pgd), flags);
return 1; return 1;
} }
...@@ -485,11 +524,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -485,11 +524,11 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t old_pte; pte_t old_pte;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags); spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
old_pte = *ptep; old_pte = *ptep;
set_pte(ptep, __pte(0)); set_pte(ptep, __pte(0));
purge_tlb_entries(mm, addr); purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
return old_pte; return old_pte;
} }
...@@ -497,10 +536,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, ...@@ -497,10 +536,10 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&pa_tlb_lock, flags); spin_lock_irqsave(pgd_spinlock(mm->pgd), flags);
set_pte(ptep, pte_wrprotect(*ptep)); set_pte(ptep, pte_wrprotect(*ptep));
purge_tlb_entries(mm, addr); purge_tlb_entries(mm, addr);
spin_unlock_irqrestore(&pa_tlb_lock, flags); spin_unlock_irqrestore(pgd_spinlock(mm->pgd), flags);
} }
#define pte_same(A,B) (pte_val(A) == pte_val(B)) #define pte_same(A,B) (pte_val(A) == pte_val(B))
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment