Skip to content
Snippets Groups Projects
Commit 29c395c7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'x86-entry-2021-02-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 irq entry updates from Thomas Gleixner:
 "The irq stack switching was moved out of the ASM entry code in course
  of the entry code consolidation. It ended up being suboptimal in
  various ways.

  This reworks the X86 irq stack handling:

   - Make the stack switching inline so the stackpointer manipulation is
     not longer at an easy to find place.

   - Get rid of the unnecessary indirect call.

   - Avoid the double stack switching in interrupt return and reuse the
     interrupt stack for softirq handling.

   - A objtool fix for CONFIG_FRAME_POINTER=y builds where it got
     confused about the stack pointer manipulation"

* tag 'x86-entry-2021-02-24' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  objtool: Fix stack-swizzle for FRAME_POINTER=y
  um: Enforce the usage of asm-generic/softirq_stack.h
  x86/softirq/64: Inline do_softirq_own_stack()
  softirq: Move do_softirq_own_stack() to generic asm header
  softirq: Move __ARCH_HAS_DO_SOFTIRQ to Kconfig
  x86: Select CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
  x86/softirq: Remove indirection in do_softirq_own_stack()
  x86/entry: Use run_sysvec_on_irqstack_cond() for XEN upcall
  x86/entry: Convert device interrupts to inline stack switching
  x86/entry: Convert system vectors to irq stack macro
  x86/irq: Provide macro for inlining irq stack switching
  x86/apic: Split out spurious handling code
  x86/irq/64: Adjust the per CPU irq stack pointer by 8
  x86/irq: Sanitize irq stack tracking
  x86/entry: Fix instrumentation annotation
parents 4c48faba 724c8a23
Branches
No related tags found
No related merge requests found
Showing
with 25 additions and 63 deletions
...@@ -821,6 +821,12 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK ...@@ -821,6 +821,12 @@ config HAVE_IRQ_EXIT_ON_IRQ_STACK
This spares a stack switch and improves cache usage on softirq This spares a stack switch and improves cache usage on softirq
processing. processing.
config HAVE_SOFTIRQ_ON_OWN_STACK
bool
help
Architecture provides a function to run __do_softirq() on a
seperate stack.
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
default 2 default 2
......
...@@ -63,6 +63,7 @@ config PARISC ...@@ -63,6 +63,7 @@ config PARISC
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
select HAVE_KPROBES_ON_FTRACE select HAVE_KPROBES_ON_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select SET_FS select SET_FS
help help
......
...@@ -12,10 +12,6 @@ ...@@ -12,10 +12,6 @@
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/irq.h> #include <linux/irq.h>
#ifdef CONFIG_IRQSTACKS
#define __ARCH_HAS_DO_SOFTIRQ
#endif
typedef struct { typedef struct {
unsigned int __softirq_pending; unsigned int __softirq_pending;
unsigned int kernel_stack_usage; unsigned int kernel_stack_usage;
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/softirq_stack.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/ldcw.h> #include <asm/ldcw.h>
......
...@@ -235,6 +235,7 @@ config PPC ...@@ -235,6 +235,7 @@ config PPC
select MMU_GATHER_PAGE_SIZE select MMU_GATHER_PAGE_SIZE
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN select HAVE_RELIABLE_STACKTRACE if PPC_BOOK3S_64 && CPU_LITTLE_ENDIAN
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
......
...@@ -37,8 +37,6 @@ extern int distribute_irqs; ...@@ -37,8 +37,6 @@ extern int distribute_irqs;
struct pt_regs; struct pt_regs;
#define __ARCH_HAS_DO_SOFTIRQ
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x) #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
/* /*
* Per-cpu stacks for handling critical, debug and machine check * Per-cpu stacks for handling critical, debug and machine check
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include <asm/livepatch.h> #include <asm/livepatch.h>
#include <asm/asm-prototypes.h> #include <asm/asm-prototypes.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/softirq_stack.h>
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#include <asm/paca.h> #include <asm/paca.h>
......
...@@ -184,6 +184,7 @@ config S390 ...@@ -184,6 +184,7 @@ config S390
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select HAVE_VIRT_CPU_ACCOUNTING_IDLE select HAVE_VIRT_CPU_ACCOUNTING_IDLE
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x)) #define or_softirq_pending(x) (S390_lowcore.softirq_pending |= (x))
#define __ARCH_IRQ_STAT #define __ARCH_IRQ_STAT
#define __ARCH_HAS_DO_SOFTIRQ
#define __ARCH_IRQ_EXIT_IRQS_DISABLED #define __ARCH_IRQ_EXIT_IRQS_DISABLED
static inline void ack_bad_irq(unsigned int irq) static inline void ack_bad_irq(unsigned int irq)
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/hw_irq.h> #include <asm/hw_irq.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/softirq_stack.h>
#include "entry.h" #include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
......
...@@ -54,6 +54,7 @@ config SUPERH ...@@ -54,6 +54,7 @@ config SUPERH
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_UID16 select HAVE_UID16
select HAVE_SOFTIRQ_ON_OWN_STACK if IRQSTACKS
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
......
...@@ -51,7 +51,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs); ...@@ -51,7 +51,6 @@ asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
#ifdef CONFIG_IRQSTACKS #ifdef CONFIG_IRQSTACKS
extern void irq_ctx_init(int cpu); extern void irq_ctx_init(int cpu);
extern void irq_ctx_exit(int cpu); extern void irq_ctx_exit(int cpu);
# define __ARCH_HAS_DO_SOFTIRQ
#else #else
# define irq_ctx_init(cpu) do { } while (0) # define irq_ctx_init(cpu) do { } while (0)
# define irq_ctx_exit(cpu) do { } while (0) # define irq_ctx_exit(cpu) do { } while (0)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <cpu/mmu_context.h> #include <cpu/mmu_context.h>
#include <asm/softirq_stack.h>
atomic_t irq_err_count; atomic_t irq_err_count;
......
...@@ -96,6 +96,7 @@ config SPARC64 ...@@ -96,6 +96,7 @@ config SPARC64
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select PCI_DOMAINS if PCI select PCI_DOMAINS if PCI
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
select HAVE_SOFTIRQ_ON_OWN_STACK
config ARCH_PROC_KCORE_TEXT config ARCH_PROC_KCORE_TEXT
def_bool y def_bool y
......
...@@ -93,7 +93,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask, ...@@ -93,7 +93,6 @@ void arch_trigger_cpumask_backtrace(const struct cpumask *mask,
extern void *hardirq_stack[NR_CPUS]; extern void *hardirq_stack[NR_CPUS];
extern void *softirq_stack[NR_CPUS]; extern void *softirq_stack[NR_CPUS];
#define __ARCH_HAS_DO_SOFTIRQ
#define NO_IRQ 0xffffffff #define NO_IRQ 0xffffffff
......
...@@ -42,6 +42,7 @@ ...@@ -42,6 +42,7 @@
#include <asm/head.h> #include <asm/head.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/softirq_stack.h>
#include "entry.h" #include "entry.h"
#include "cpumap.h" #include "cpumap.h"
......
...@@ -20,6 +20,7 @@ generic-y += param.h ...@@ -20,6 +20,7 @@ generic-y += param.h
generic-y += pci.h generic-y += pci.h
generic-y += percpu.h generic-y += percpu.h
generic-y += preempt.h generic-y += preempt.h
generic-y += softirq_stack.h
generic-y += switch_to.h generic-y += switch_to.h
generic-y += topology.h generic-y += topology.h
generic-y += trace_clock.h generic-y += trace_clock.h
......
...@@ -191,6 +191,7 @@ config X86 ...@@ -191,6 +191,7 @@ config X86
select HAVE_HW_BREAKPOINT select HAVE_HW_BREAKPOINT
select HAVE_IDE select HAVE_IDE
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
...@@ -223,6 +224,7 @@ config X86 ...@@ -223,6 +224,7 @@ config X86
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64 select HAVE_STACK_VALIDATION if X86_64
select HAVE_STATIC_CALL select HAVE_STATIC_CALL
......
...@@ -249,30 +249,23 @@ static __always_inline bool get_and_clear_inhcall(void) { return false; } ...@@ -249,30 +249,23 @@ static __always_inline bool get_and_clear_inhcall(void) { return false; }
static __always_inline void restore_inhcall(bool inhcall) { } static __always_inline void restore_inhcall(bool inhcall) { }
#endif #endif
static void __xen_pv_evtchn_do_upcall(void) static void __xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{ {
irq_enter_rcu(); struct pt_regs *old_regs = set_irq_regs(regs);
inc_irq_stat(irq_hv_callback_count); inc_irq_stat(irq_hv_callback_count);
xen_hvm_evtchn_do_upcall(); xen_hvm_evtchn_do_upcall();
irq_exit_rcu(); set_irq_regs(old_regs);
} }
__visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs)
{ {
struct pt_regs *old_regs; irqentry_state_t state = irqentry_enter(regs);
bool inhcall; bool inhcall;
irqentry_state_t state;
state = irqentry_enter(regs); run_sysvec_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
old_regs = set_irq_regs(regs);
instrumentation_begin();
run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs);
instrumentation_begin();
set_irq_regs(old_regs);
inhcall = get_and_clear_inhcall(); inhcall = get_and_clear_inhcall();
if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) { if (inhcall && !WARN_ON_ONCE(state.exit_rcu)) {
......
...@@ -754,47 +754,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs) ...@@ -754,47 +754,6 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
SYM_CODE_END(.Lbad_gs) SYM_CODE_END(.Lbad_gs)
.previous .previous
/*
* rdi: New stack pointer points to the top word of the stack
* rsi: Function pointer
* rdx: Function argument (can be NULL if none)
*/
SYM_FUNC_START(asm_call_on_stack)
SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL)
SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL)
/*
* Save the frame pointer unconditionally. This allows the ORC
* unwinder to handle the stack switch.
*/
pushq %rbp
mov %rsp, %rbp
/*
* The unwinder relies on the word at the top of the new stack
* page linking back to the previous RSP.
*/
mov %rsp, (%rdi)
mov %rdi, %rsp
/* Move the argument to the right place */
mov %rdx, %rdi
1:
.pushsection .discard.instr_begin
.long 1b - .
.popsection
CALL_NOSPEC rsi
2:
.pushsection .discard.instr_end
.long 2b - .
.popsection
/* Restore the previous stack pointer from RBP. */
leaveq
ret
SYM_FUNC_END(asm_call_on_stack)
#ifdef CONFIG_XEN_PV #ifdef CONFIG_XEN_PV
/* /*
* A note on the "critical region" in our callback handler. * A note on the "critical region" in our callback handler.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment