Skip to content
Snippets Groups Projects
Commit dbb381b6 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull timekeeping and timer updates from Thomas Gleixner:
 "Core:

   - Consolidation of the vDSO build infrastructure to address the
     difficulties of cross-builds for ARM64 compat vDSO libraries by
     restricting the exposure of header content to the vDSO build.

     This is achieved by splitting out header content into separate
     headers. which contain only the minimaly required information which
     is necessary to build the vDSO. These new headers are included from
     the kernel headers and the vDSO specific files.

   - Enhancements to the generic vDSO library allowing more fine grained
     control over the compiled in code, further reducing architecture
     specific storage and preparing for adopting the generic library by
     PPC.

   - Cleanup and consolidation of the exit related code in posix CPU
     timers.

   - Small cleanups and enhancements here and there

  Drivers:

   - The obligatory new drivers: Ingenic JZ47xx and X1000 TCU support

   - Correct the clock rate of PIT64b global clock

   - setup_irq() cleanup

   - Preparation for PWM and suspend support for the TI DM timer

   - Expand the fttmr010 driver to support ast2600 systems

   - The usual small fixes, enhancements and cleanups all over the
     place"

* tag 'timers-core-2020-03-30' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (80 commits)
  Revert "clocksource/drivers/timer-probe: Avoid creating dead devices"
  vdso: Fix clocksource.h macro detection
  um: Fix header inclusion
  arm64: vdso32: Enable Clang Compilation
  lib/vdso: Enable common headers
  arm: vdso: Enable arm to use common headers
  x86/vdso: Enable x86 to use common headers
  mips: vdso: Enable mips to use common headers
  arm64: vdso32: Include common headers in the vdso library
  arm64: vdso: Include common headers in the vdso library
  arm64: Introduce asm/vdso/processor.h
  arm64: vdso32: Code clean up
  linux/elfnote.h: Replace elf.h with UAPI equivalent
  scripts: Fix the inclusion order in modpost
  common: Introduce processor.h
  linux/ktime.h: Extract common header for vDSO
  linux/jiffies.h: Extract common header for vDSO
  linux/time64.h: Extract common header for vDSO
  linux/time32.h: Extract common header for vDSO
  linux/time.h: Extract common header for vDSO
  ...
parents 336622e9 4479730e
No related branches found
No related tags found
No related merge requests found
Showing
with 131 additions and 116 deletions
...@@ -11,6 +11,7 @@ Required properties: ...@@ -11,6 +11,7 @@ Required properties:
"moxa,moxart-timer", "faraday,fttmr010" "moxa,moxart-timer", "faraday,fttmr010"
"aspeed,ast2400-timer" "aspeed,ast2400-timer"
"aspeed,ast2500-timer" "aspeed,ast2500-timer"
"aspeed,ast2600-timer"
- reg : Should contain registers location and length - reg : Should contain registers location and length
- interrupts : Should contain the three timer interrupts usually with - interrupts : Should contain the three timer interrupts usually with
......
...@@ -10,6 +10,7 @@ Required properties: ...@@ -10,6 +10,7 @@ Required properties:
* ingenic,jz4740-tcu * ingenic,jz4740-tcu
* ingenic,jz4725b-tcu * ingenic,jz4725b-tcu
* ingenic,jz4770-tcu * ingenic,jz4770-tcu
* ingenic,x1000-tcu
followed by "simple-mfd". followed by "simple-mfd".
- reg: Should be the offset/length value corresponding to the TCU registers - reg: Should be the offset/length value corresponding to the TCU registers
- clocks: List of phandle & clock specifiers for clocks external to the TCU. - clocks: List of phandle & clock specifiers for clocks external to the TCU.
......
...@@ -3,7 +3,6 @@ config ARM ...@@ -3,7 +3,6 @@ config ARM
bool bool
default y default y
select ARCH_32BIT_OFF_T select ARCH_32BIT_OFF_T
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_CLOCKSOURCE_H #ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H
struct arch_clocksource_data { #include <asm/vdso/clocksource.h>
bool vdso_direct; /* Usable for direct VDSO access? */
};
#endif #endif /* _ASM_CLOCKSOURCE_H */
...@@ -50,25 +50,7 @@ ...@@ -50,25 +50,7 @@
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ #include <asm/vdso/cp15.h>
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
#define __ACCESS_CP15_64(Op1, CRm) \
"mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
#define __read_sysreg(r, w, c, t) ({ \
t __val; \
asm volatile(r " " c : "=r" (__val)); \
__val; \
})
#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
#define CNTVCT __ACCESS_CP15_64(1, c14)
extern unsigned long cr_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/types.h> #include <asm/types.h>
#include <asm/unified.h> #include <asm/unified.h>
#include <asm/vdso/processor.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
...@@ -85,16 +86,6 @@ extern void release_thread(struct task_struct *); ...@@ -85,16 +86,6 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
#define cpu_relax() \
do { \
smp_mb(); \
__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
} while (0)
#else
#define cpu_relax() barrier()
#endif
#define task_pt_regs(p) \ #define task_pt_regs(p) \
((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSOCLOCKSOURCE_H
#define __ASM_VDSOCLOCKSOURCE_H
#define VDSO_ARCH_CLOCKMODES \
VDSO_CLOCKMODE_ARCHTIMER
#endif /* __ASM_VDSOCLOCKSOURCE_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 ARM Ltd.
*/
#ifndef __ASM_VDSO_CP15_H
#define __ASM_VDSO_CP15_H
#ifndef __ASSEMBLY__
#ifdef CONFIG_CPU_CP15
#include <linux/stringify.h>
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
#define __ACCESS_CP15_64(Op1, CRm) \
"mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
#define __read_sysreg(r, w, c, t) ({ \
t __val; \
asm volatile(r " " c : "=r" (__val)); \
__val; \
})
#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
#define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
#define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
#define CNTVCT __ACCESS_CP15_64(1, c14)
#endif /* CONFIG_CPU_CP15 */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_VDSO_CP15_H */
...@@ -7,9 +7,9 @@ ...@@ -7,9 +7,9 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/barrier.h> #include <asm/errno.h>
#include <asm/cp15.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/vdso/cp15.h>
#include <uapi/linux/time.h> #include <uapi/linux/time.h>
#define VDSO_HAS_CLOCK_GETRES 1 #define VDSO_HAS_CLOCK_GETRES 1
...@@ -106,20 +106,32 @@ static __always_inline int clock_getres32_fallback( ...@@ -106,20 +106,32 @@ static __always_inline int clock_getres32_fallback(
return ret; return ret;
} }
static inline bool arm_vdso_hres_capable(void)
{
return IS_ENABLED(CONFIG_ARM_ARCH_TIMER);
}
#define __arch_vdso_hres_capable arm_vdso_hres_capable
static __always_inline u64 __arch_get_hw_counter(int clock_mode) static __always_inline u64 __arch_get_hw_counter(int clock_mode)
{ {
#ifdef CONFIG_ARM_ARCH_TIMER #ifdef CONFIG_ARM_ARCH_TIMER
u64 cycle_now; u64 cycle_now;
if (!clock_mode) /*
return -EINVAL; * Core checks for mode already, so this raced against a concurrent
* update. Return something. Core will do another round and then
* see the mode change and fallback to the syscall.
*/
if (clock_mode == VDSO_CLOCKMODE_NONE)
return 0;
isb(); isb();
cycle_now = read_sysreg(CNTVCT); cycle_now = read_sysreg(CNTVCT);
return cycle_now; return cycle_now;
#else #else
return -EINVAL; /* use fallback */ /* Make GCC happy. This is compiled out anyway */
return 0;
#endif #endif
} }
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 ARM Ltd.
*/
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
#ifndef __ASSEMBLY__
#if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
#define cpu_relax() \
do { \
smp_mb(); \
__asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
} while (0)
#else
#define cpu_relax() barrier()
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
...@@ -11,18 +11,6 @@ ...@@ -11,18 +11,6 @@
extern struct vdso_data *vdso_data; extern struct vdso_data *vdso_data;
extern bool cntvct_ok; extern bool cntvct_ok;
static __always_inline
bool tk_is_cntvct(const struct timekeeper *tk)
{
if (!IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
return false;
if (!tk->tkr_mono.clock->archdata.vdso_direct)
return false;
return true;
}
/* /*
* Update the vDSO data page to keep in sync with kernel timekeeping. * Update the vDSO data page to keep in sync with kernel timekeeping.
*/ */
...@@ -33,29 +21,6 @@ struct vdso_data *__arm_get_k_vdso_data(void) ...@@ -33,29 +21,6 @@ struct vdso_data *__arm_get_k_vdso_data(void)
} }
#define __arch_get_k_vdso_data __arm_get_k_vdso_data #define __arch_get_k_vdso_data __arm_get_k_vdso_data
static __always_inline
bool __arm_update_vdso_data(void)
{
return cntvct_ok;
}
#define __arch_update_vdso_data __arm_update_vdso_data
static __always_inline
int __arm_get_clock_mode(struct timekeeper *tk)
{
u32 __tk_is_cntvct = tk_is_cntvct(tk);
return __tk_is_cntvct;
}
#define __arch_get_clock_mode __arm_get_clock_mode
static __always_inline
int __arm_use_vsyscall(struct vdso_data *vdata)
{
return vdata[CS_HRES_COARSE].clock_mode;
}
#define __arch_use_vsyscall __arm_use_vsyscall
static __always_inline static __always_inline
void __arm_sync_vdso_data(struct vdso_data *vdata) void __arm_sync_vdso_data(struct vdso_data *vdata)
{ {
......
...@@ -9,7 +9,6 @@ config ARM64 ...@@ -9,7 +9,6 @@ config ARM64
select ACPI_MCFG if (ACPI && PCI) select ACPI_MCFG if (ACPI && PCI)
select ACPI_SPCR_TABLE if ACPI select ACPI_SPCR_TABLE if ACPI
select ACPI_PPTT if ACPI select ACPI_PPTT if ACPI
select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
......
...@@ -2,8 +2,6 @@ ...@@ -2,8 +2,6 @@
#ifndef _ASM_CLOCKSOURCE_H #ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H #define _ASM_CLOCKSOURCE_H
struct arch_clocksource_data { #include <asm/vdso/clocksource.h>
bool vdso_direct; /* Usable for direct VDSO access? */
};
#endif #endif
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <linux/string.h> #include <linux/string.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <vdso/processor.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/hw_breakpoint.h> #include <asm/hw_breakpoint.h>
...@@ -256,11 +258,6 @@ extern void release_thread(struct task_struct *); ...@@ -256,11 +258,6 @@ extern void release_thread(struct task_struct *);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
static inline void cpu_relax(void)
{
asm volatile("yield" ::: "memory");
}
/* Thread switching */ /* Thread switching */
extern struct task_struct *cpu_switch_to(struct task_struct *prev, extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next); struct task_struct *next);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSOCLOCKSOURCE_H
#define __ASM_VDSOCLOCKSOURCE_H
#define VDSO_ARCH_CLOCKMODES \
VDSO_CLOCKMODE_ARCHTIMER
#endif
...@@ -8,12 +8,10 @@ ...@@ -8,12 +8,10 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/unistd.h> #include <asm/unistd.h>
#include <uapi/linux/time.h> #include <asm/errno.h>
#include <asm/vdso/compat_barrier.h> #include <asm/vdso/compat_barrier.h>
#define __VDSO_USE_SYSCALL ULLONG_MAX
#define VDSO_HAS_CLOCK_GETRES 1 #define VDSO_HAS_CLOCK_GETRES 1
#define BUILD_VDSO32 1 #define BUILD_VDSO32 1
...@@ -78,10 +76,6 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) ...@@ -78,10 +76,6 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
register long ret asm ("r0"); register long ret asm ("r0");
register long nr asm("r7") = __NR_compat_clock_getres_time64; register long nr asm("r7") = __NR_compat_clock_getres_time64;
/* The checks below are required for ABI consistency with arm */
if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
return -EINVAL;
asm volatile( asm volatile(
" swi #0\n" " swi #0\n"
: "=r" (ret) : "=r" (ret)
...@@ -99,10 +93,6 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts) ...@@ -99,10 +93,6 @@ int clock_getres32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
register long ret asm ("r0"); register long ret asm ("r0");
register long nr asm("r7") = __NR_compat_clock_getres; register long nr asm("r7") = __NR_compat_clock_getres;
/* The checks below are required for ABI consistency with arm */
if ((_clkid >= MAX_CLOCKS) && (_ts == NULL))
return -EINVAL;
asm volatile( asm volatile(
" swi #0\n" " swi #0\n"
: "=r" (ret) : "=r" (ret)
...@@ -117,11 +107,12 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) ...@@ -117,11 +107,12 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
u64 res; u64 res;
/* /*
* clock_mode == 0 implies that vDSO are enabled otherwise * Core checks for mode already, so this raced against a concurrent
* fallback on syscall. * update. Return something. Core will do another round and then
* see the mode change and fallback to the syscall.
*/ */
if (clock_mode) if (clock_mode == VDSO_CLOCKMODE_NONE)
return __VDSO_USE_SYSCALL; return 0;
/* /*
* This isb() is required to prevent that the counter value * This isb() is required to prevent that the counter value
......
...@@ -8,9 +8,6 @@ ...@@ -8,9 +8,6 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/unistd.h> #include <asm/unistd.h>
#include <uapi/linux/time.h>
#define __VDSO_USE_SYSCALL ULLONG_MAX
#define VDSO_HAS_CLOCK_GETRES 1 #define VDSO_HAS_CLOCK_GETRES 1
...@@ -71,11 +68,12 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode) ...@@ -71,11 +68,12 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
u64 res; u64 res;
/* /*
* clock_mode == 0 implies that vDSO are enabled otherwise * Core checks for mode already, so this raced against a concurrent
* fallback on syscall. * update. Return something. Core will do another round and then
* see the mode change and fallback to the syscall.
*/ */
if (clock_mode) if (clock_mode == VDSO_CLOCKMODE_NONE)
return __VDSO_USE_SYSCALL; return 0;
/* /*
* This isb() is required to prevent that the counter value * This isb() is required to prevent that the counter value
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2020 ARM Ltd.
*/
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
#ifndef __ASSEMBLY__
static inline void cpu_relax(void)
{
asm volatile("yield" ::: "memory");
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
...@@ -21,15 +21,6 @@ struct vdso_data *__arm64_get_k_vdso_data(void) ...@@ -21,15 +21,6 @@ struct vdso_data *__arm64_get_k_vdso_data(void)
} }
#define __arch_get_k_vdso_data __arm64_get_k_vdso_data #define __arch_get_k_vdso_data __arm64_get_k_vdso_data
static __always_inline
int __arm64_get_clock_mode(struct timekeeper *tk)
{
u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct;
return use_syscall;
}
#define __arch_get_clock_mode __arm64_get_clock_mode
static __always_inline static __always_inline
void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk) void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
{ {
......
...@@ -5,8 +5,6 @@ ...@@ -5,8 +5,6 @@
* Copyright (C) 2018 ARM Limited * Copyright (C) 2018 ARM Limited
* *
*/ */
#include <linux/time.h>
#include <linux/types.h>
int __kernel_clock_gettime(clockid_t clock, int __kernel_clock_gettime(clockid_t clock,
struct __kernel_timespec *ts) struct __kernel_timespec *ts)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment