Skip to content
Snippets Groups Projects
Commit e98e03d0 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull more s390 updates from Vasily Gorbik:

 - Fix preempt_count initialization.

 - Rework call_on_stack() macro to add proper type handling and avoid
   possible register corruption.

 - More error prone "register asm" removal and fixes.

 - Fix syscall restarting when multiple signals are coming in. This adds
   minimalistic trampolines to vdso so we can return from signal without
   using the stack which requires pgm check handler hacks when NX is
   enabled.

 - Remove HAVE_IRQ_EXIT_ON_IRQ_STACK since this is no longer true after
   switch to generic entry.

 - Fix protected virtualization secure storage access exception
   handling.

 - Make machine check C handler always enter with DAT enabled and move
   register validation to C code.

 - Fix tinyconfig boot problem by avoiding MONITOR CALL without
   CONFIG_BUG.

 - Increase asm symbols alignment to 16 to make it consistent with
   compilers.

 - Enable concurrent access to the CPU Measurement Counter Facility.

 - Add support for dynamic AP bus size limit and rework ap_dqap to deal
   with messages greater than recv buffer.

* tag 's390-5.14-2' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (41 commits)
  s390: preempt: Fix preempt_count initialization
  s390/linkage: increase asm symbols alignment to 16
  s390: rename CALL_ON_STACK_NORETURN() to call_on_stack_noreturn()
  s390: add type checking to CALL_ON_STACK_NORETURN() macro
  s390: remove old CALL_ON_STACK() macro
  s390/softirq: use call_on_stack() macro
  s390/lib: use call_on_stack() macro
  s390/smp: use call_on_stack() macro
  s390/kexec: use call_on_stack() macro
  s390/irq: use call_on_stack() macro
  s390/mm: use call_on_stack() macro
  s390: introduce proper type handling call_on_stack() macro
  s390/irq: simplify on_async_stack()
  s390/irq: inline do_softirq_own_stack()
  s390/irq: simplify do_softirq_own_stack()
  s390/ap: get rid of register asm in ap_dqap()
  s390: rename PIF_SYSCALL_RESTART to PIF_EXECVE_PGSTE_RESTART
  s390: move restart of execve() syscall
  s390/signal: remove sigreturn on stack
  s390/signal: switch to using vdso for sigreturn and syscall restart
  ...
parents 379cf80a 6a942f57
No related branches found
No related tags found
No related merge requests found
Showing
with 334 additions and 172 deletions
...@@ -163,7 +163,6 @@ config S390 ...@@ -163,7 +163,6 @@ config S390
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select HAVE_IOREMAP_PROT if PCI select HAVE_IOREMAP_PROT if PCI
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_KERNEL_BZIP2 select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4 select HAVE_KERNEL_LZ4
...@@ -438,6 +437,7 @@ config COMPAT ...@@ -438,6 +437,7 @@ config COMPAT
select COMPAT_OLD_SIGACTION select COMPAT_OLD_SIGACTION
select HAVE_UID16 select HAVE_UID16
depends on MULTIUSER depends on MULTIUSER
depends on !CC_IS_CLANG
help help
Select this option if you want to enable your system kernel to Select this option if you want to enable your system kernel to
handle system-calls from ELF binaries for 31 bit ESA. This option handle system-calls from ELF binaries for 31 bit ESA. This option
......
...@@ -166,6 +166,19 @@ archheaders: ...@@ -166,6 +166,19 @@ archheaders:
archprepare: archprepare:
$(Q)$(MAKE) $(build)=$(syscalls) kapi $(Q)$(MAKE) $(build)=$(syscalls) kapi
$(Q)$(MAKE) $(build)=$(tools) kapi $(Q)$(MAKE) $(build)=$(tools) kapi
ifeq ($(KBUILD_EXTMOD),)
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
# In order to do that, we should use the archprepare target, but we can't since
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
# Therefore we need to generate the header after prepare0 has been made, hence
# this hack.
prepare: vdso_prepare
vdso_prepare: prepare0
$(Q)$(MAKE) $(build)=arch/s390/kernel/vdso64 include/generated/vdso64-offsets.h
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
$(build)=arch/s390/kernel/vdso32 include/generated/vdso32-offsets.h)
endif
# Don't use tabs in echo arguments # Don't use tabs in echo arguments
define archhelp define archhelp
......
...@@ -23,6 +23,7 @@ unsigned long __bootdata_preserved(vmemmap_size); ...@@ -23,6 +23,7 @@ unsigned long __bootdata_preserved(vmemmap_size);
unsigned long __bootdata_preserved(MODULES_VADDR); unsigned long __bootdata_preserved(MODULES_VADDR);
unsigned long __bootdata_preserved(MODULES_END); unsigned long __bootdata_preserved(MODULES_END);
unsigned long __bootdata(ident_map_size); unsigned long __bootdata(ident_map_size);
int __bootdata(is_full_image) = 1;
u64 __bootdata_preserved(stfle_fac_list[16]); u64 __bootdata_preserved(stfle_fac_list[16]);
u64 __bootdata_preserved(alt_stfle_fac_list[16]); u64 __bootdata_preserved(alt_stfle_fac_list[16]);
......
...@@ -36,6 +36,7 @@ void uv_query_info(void) ...@@ -36,6 +36,7 @@ void uv_query_info(void)
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE); uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf; uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id; uv_info.max_guest_cpu_id = uvcb.max_guest_cpu_id;
uv_info.uv_feature_indications = uvcb.uv_feature_indications;
} }
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
......
...@@ -53,18 +53,20 @@ struct ap_queue_status { ...@@ -53,18 +53,20 @@ struct ap_queue_status {
*/ */
static inline bool ap_instructions_available(void) static inline bool ap_instructions_available(void)
{ {
register unsigned long reg0 asm ("0") = AP_MKQID(0, 0); unsigned long reg0 = AP_MKQID(0, 0);
register unsigned long reg1 asm ("1") = 0; unsigned long reg1 = 0;
register unsigned long reg2 asm ("2") = 0;
asm volatile( asm volatile(
" lgr 0,%[reg0]\n" /* qid into gr0 */
" lghi 1,0\n" /* 0 into gr1 */
" lghi 2,0\n" /* 0 into gr2 */
" .long 0xb2af0000\n" /* PQAP(TAPQ) */ " .long 0xb2af0000\n" /* PQAP(TAPQ) */
"0: la %0,1\n" "0: la %[reg1],1\n" /* 1 into reg1 */
"1:\n" "1:\n"
EX_TABLE(0b, 1b) EX_TABLE(0b, 1b)
: "+d" (reg1), "+d" (reg2) : [reg1] "+&d" (reg1)
: "d" (reg0) : [reg0] "d" (reg0)
: "cc"); : "cc", "0", "1", "2");
return reg1 != 0; return reg1 != 0;
} }
...@@ -77,14 +79,18 @@ static inline bool ap_instructions_available(void) ...@@ -77,14 +79,18 @@ static inline bool ap_instructions_available(void)
*/ */
static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info) static inline struct ap_queue_status ap_tapq(ap_qid_t qid, unsigned long *info)
{ {
register unsigned long reg0 asm ("0") = qid; struct ap_queue_status reg1;
register struct ap_queue_status reg1 asm ("1"); unsigned long reg2;
register unsigned long reg2 asm ("2");
asm volatile(
asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ " lgr 0,%[qid]\n" /* qid into gr0 */
: "=d" (reg1), "=d" (reg2) " lghi 2,0\n" /* 0 into gr2 */
: "d" (reg0) " .long 0xb2af0000\n" /* PQAP(TAPQ) */
: "cc"); " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
" lgr %[reg2],2\n" /* gr2 into reg2 */
: [reg1] "=&d" (reg1), [reg2] "=&d" (reg2)
: [qid] "d" (qid)
: "cc", "0", "1", "2");
if (info) if (info)
*info = reg2; *info = reg2;
return reg1; return reg1;
...@@ -115,14 +121,16 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid, ...@@ -115,14 +121,16 @@ static inline struct ap_queue_status ap_test_queue(ap_qid_t qid,
*/ */
static inline struct ap_queue_status ap_rapq(ap_qid_t qid) static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
{ {
register unsigned long reg0 asm ("0") = qid | (1UL << 24); unsigned long reg0 = qid | (1UL << 24); /* fc 1UL is RAPQ */
register struct ap_queue_status reg1 asm ("1"); struct ap_queue_status reg1;
asm volatile( asm volatile(
".long 0xb2af0000" /* PQAP(RAPQ) */ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
: "=d" (reg1) " .long 0xb2af0000\n" /* PQAP(RAPQ) */
: "d" (reg0) " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: "cc"); : [reg1] "=&d" (reg1)
: [reg0] "d" (reg0)
: "cc", "0", "1");
return reg1; return reg1;
} }
...@@ -134,14 +142,16 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid) ...@@ -134,14 +142,16 @@ static inline struct ap_queue_status ap_rapq(ap_qid_t qid)
*/ */
static inline struct ap_queue_status ap_zapq(ap_qid_t qid) static inline struct ap_queue_status ap_zapq(ap_qid_t qid)
{ {
register unsigned long reg0 asm ("0") = qid | (2UL << 24); unsigned long reg0 = qid | (2UL << 24); /* fc 2UL is ZAPQ */
register struct ap_queue_status reg1 asm ("1"); struct ap_queue_status reg1;
asm volatile( asm volatile(
".long 0xb2af0000" /* PQAP(ZAPQ) */ " lgr 0,%[reg0]\n" /* qid arg into gr0 */
: "=d" (reg1) " .long 0xb2af0000\n" /* PQAP(ZAPQ) */
: "d" (reg0) " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: "cc"); : [reg1] "=&d" (reg1)
: [reg0] "d" (reg0)
: "cc", "0", "1");
return reg1; return reg1;
} }
...@@ -172,18 +182,20 @@ struct ap_config_info { ...@@ -172,18 +182,20 @@ struct ap_config_info {
*/ */
static inline int ap_qci(struct ap_config_info *config) static inline int ap_qci(struct ap_config_info *config)
{ {
register unsigned long reg0 asm ("0") = 4UL << 24; unsigned long reg0 = 4UL << 24; /* fc 4UL is QCI */
register unsigned long reg1 asm ("1") = -EOPNOTSUPP; unsigned long reg1 = -EOPNOTSUPP;
register struct ap_config_info *reg2 asm ("2") = config; struct ap_config_info *reg2 = config;
asm volatile( asm volatile(
" lgr 0,%[reg0]\n" /* QCI fc into gr0 */
" lgr 2,%[reg2]\n" /* ptr to config into gr2 */
" .long 0xb2af0000\n" /* PQAP(QCI) */ " .long 0xb2af0000\n" /* PQAP(QCI) */
"0: la %0,0\n" "0: la %[reg1],0\n" /* good case, QCI fc available */
"1:\n" "1:\n"
EX_TABLE(0b, 1b) EX_TABLE(0b, 1b)
: "+d" (reg1) : [reg1] "+&d" (reg1)
: "d" (reg0), "d" (reg2) : [reg0] "d" (reg0), [reg2] "d" (reg2)
: "cc", "memory"); : "cc", "memory", "0", "2");
return reg1; return reg1;
} }
...@@ -220,21 +232,25 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid, ...@@ -220,21 +232,25 @@ static inline struct ap_queue_status ap_aqic(ap_qid_t qid,
struct ap_qirq_ctrl qirqctrl, struct ap_qirq_ctrl qirqctrl,
void *ind) void *ind)
{ {
register unsigned long reg0 asm ("0") = qid | (3UL << 24); unsigned long reg0 = qid | (3UL << 24); /* fc 3UL is AQIC */
register union { union {
unsigned long value; unsigned long value;
struct ap_qirq_ctrl qirqctrl; struct ap_qirq_ctrl qirqctrl;
struct ap_queue_status status; struct ap_queue_status status;
} reg1 asm ("1"); } reg1;
register void *reg2 asm ("2") = ind; void *reg2 = ind;
reg1.qirqctrl = qirqctrl; reg1.qirqctrl = qirqctrl;
asm volatile( asm volatile(
".long 0xb2af0000" /* PQAP(AQIC) */ " lgr 0,%[reg0]\n" /* qid param into gr0 */
: "+d" (reg1) " lgr 1,%[reg1]\n" /* irq ctrl into gr1 */
: "d" (reg0), "d" (reg2) " lgr 2,%[reg2]\n" /* ni addr into gr2 */
: "cc"); " .long 0xb2af0000\n" /* PQAP(AQIC) */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: [reg1] "+&d" (reg1)
: [reg0] "d" (reg0), [reg2] "d" (reg2)
: "cc", "0", "1", "2");
return reg1.status; return reg1.status;
} }
...@@ -268,21 +284,24 @@ union ap_qact_ap_info { ...@@ -268,21 +284,24 @@ union ap_qact_ap_info {
static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit,
union ap_qact_ap_info *apinfo) union ap_qact_ap_info *apinfo)
{ {
register unsigned long reg0 asm ("0") = qid | (5UL << 24) unsigned long reg0 = qid | (5UL << 24) | ((ifbit & 0x01) << 22);
| ((ifbit & 0x01) << 22); union {
register union {
unsigned long value; unsigned long value;
struct ap_queue_status status; struct ap_queue_status status;
} reg1 asm ("1"); } reg1;
register unsigned long reg2 asm ("2"); unsigned long reg2;
reg1.value = apinfo->val; reg1.value = apinfo->val;
asm volatile( asm volatile(
".long 0xb2af0000" /* PQAP(QACT) */ " lgr 0,%[reg0]\n" /* qid param into gr0 */
: "+d" (reg1), "=d" (reg2) " lgr 1,%[reg1]\n" /* qact in info into gr1 */
: "d" (reg0) " .long 0xb2af0000\n" /* PQAP(QACT) */
: "cc"); " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
" lgr %[reg2],2\n" /* qact out info into reg2 */
: [reg1] "+&d" (reg1), [reg2] "=&d" (reg2)
: [reg0] "d" (reg0)
: "cc", "0", "1", "2");
apinfo->val = reg2; apinfo->val = reg2;
return reg1.status; return reg1.status;
} }
...@@ -303,19 +322,24 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid, ...@@ -303,19 +322,24 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
unsigned long long psmid, unsigned long long psmid,
void *msg, size_t length) void *msg, size_t length)
{ {
register unsigned long reg0 asm ("0") = qid | 0x40000000UL; unsigned long reg0 = qid | 0x40000000UL; /* 0x4... is last msg part */
register struct ap_queue_status reg1 asm ("1"); union register_pair nqap_r1, nqap_r2;
register unsigned long reg2 asm ("2") = (unsigned long) msg; struct ap_queue_status reg1;
register unsigned long reg3 asm ("3") = (unsigned long) length;
register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); nqap_r1.even = (unsigned int)(psmid >> 32);
register unsigned long reg5 asm ("5") = psmid & 0xffffffff; nqap_r1.odd = psmid & 0xffffffff;
nqap_r2.even = (unsigned long)msg;
nqap_r2.odd = (unsigned long)length;
asm volatile ( asm volatile (
"0: .long 0xb2ad0042\n" /* NQAP */ " lgr 0,%[reg0]\n" /* qid param in gr0 */
" brc 2,0b" "0: .insn rre,0xb2ad0000,%[nqap_r1],%[nqap_r2]\n"
: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) " brc 2,0b\n" /* handle partial completion */
: "d" (reg4), "d" (reg5) " lgr %[reg1],1\n" /* gr1 (status) into reg1 */
: "cc", "memory"); : [reg0] "+&d" (reg0), [reg1] "=&d" (reg1),
[nqap_r2] "+&d" (nqap_r2.pair)
: [nqap_r1] "d" (nqap_r1.pair)
: "cc", "memory", "0", "1");
return reg1; return reg1;
} }
...@@ -325,6 +349,8 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid, ...@@ -325,6 +349,8 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* @psmid: Pointer to program supplied message identifier * @psmid: Pointer to program supplied message identifier
* @msg: The message text * @msg: The message text
* @length: The message length * @length: The message length
* @reslength: Resitual length on return
* @resgr0: input: gr0 value (only used if != 0), output: resitual gr0 content
* *
* Returns AP queue status structure. * Returns AP queue status structure.
* Condition code 1 on DQAP means the receive has taken place * Condition code 1 on DQAP means the receive has taken place
...@@ -336,27 +362,65 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid, ...@@ -336,27 +362,65 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
* Note that gpr2 is used by the DQAP instruction to keep track of * Note that gpr2 is used by the DQAP instruction to keep track of
* any 'residual' length, in case the instruction gets interrupted. * any 'residual' length, in case the instruction gets interrupted.
* Hence it gets zeroed before the instruction. * Hence it gets zeroed before the instruction.
* If the message does not fit into the buffer, this function will
* return with a truncated message and the reply in the firmware queue
* is not removed. This is indicated to the caller with an
* ap_queue_status response_code value of all bits on (0xFF) and (if
* the reslength ptr is given) the remaining length is stored in
* *reslength and (if the resgr0 ptr is given) the updated gr0 value
* for further processing of this msg entry is stored in *resgr0. The
* caller needs to detect this situation and should invoke ap_dqap
* with a valid resgr0 ptr and a value in there != 0 to indicate that
* *resgr0 is to be used instead of qid to further process this entry.
*/ */
static inline struct ap_queue_status ap_dqap(ap_qid_t qid, static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
unsigned long long *psmid, unsigned long long *psmid,
void *msg, size_t length) void *msg, size_t length,
size_t *reslength,
unsigned long *resgr0)
{ {
register unsigned long reg0 asm("0") = qid | 0x80000000UL; unsigned long reg0 = resgr0 && *resgr0 ? *resgr0 : qid | 0x80000000UL;
register struct ap_queue_status reg1 asm ("1"); struct ap_queue_status reg1;
register unsigned long reg2 asm("2") = 0UL; unsigned long reg2;
register unsigned long reg4 asm("4") = (unsigned long) msg; union register_pair rp1, rp2;
register unsigned long reg5 asm("5") = (unsigned long) length;
register unsigned long reg6 asm("6") = 0UL;
register unsigned long reg7 asm("7") = 0UL;
rp1.even = 0UL;
rp1.odd = 0UL;
rp2.even = (unsigned long)msg;
rp2.odd = (unsigned long)length;
asm volatile( asm volatile(
"0: .long 0xb2ae0064\n" /* DQAP */ " lgr 0,%[reg0]\n" /* qid param into gr0 */
" brc 6,0b\n" " lghi 2,0\n" /* 0 into gr2 (res length) */
: "+d" (reg0), "=d" (reg1), "+d" (reg2), "0: ltgr %N[rp2],%N[rp2]\n" /* check buf len */
"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7) " jz 2f\n" /* go out if buf len is 0 */
: : "cc", "memory"); "1: .insn rre,0xb2ae0000,%[rp1],%[rp2]\n"
*psmid = (((unsigned long long) reg6) << 32) + reg7; " brc 6,0b\n" /* handle partial complete */
"2: lgr %[reg0],0\n" /* gr0 (qid + info) into reg0 */
" lgr %[reg1],1\n" /* gr1 (status) into reg1 */
" lgr %[reg2],2\n" /* gr2 (res length) into reg2 */
: [reg0] "+&d" (reg0), [reg1] "=&d" (reg1), [reg2] "=&d" (reg2),
[rp1] "+&d" (rp1.pair), [rp2] "+&d" (rp2.pair)
:
: "cc", "memory", "0", "1", "2");
if (reslength)
*reslength = reg2;
if (reg2 != 0 && rp2.odd == 0) {
/*
* Partially complete, status in gr1 is not set.
* Signal the caller that this dqap is only partially received
* with a special status response code 0xFF and *resgr0 updated
*/
reg1.response_code = 0xFF;
if (resgr0)
*resgr0 = reg0;
} else {
*psmid = (((unsigned long long)rp1.even) << 32) + rp1.odd;
if (resgr0)
*resgr0 = 0;
}
return reg1; return reg1;
} }
......
...@@ -32,39 +32,22 @@ static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = { ...@@ -32,39 +32,22 @@ static const u64 cpumf_ctr_ctl[CPUMF_CTR_SET_MAX] = {
[CPUMF_CTR_SET_MT_DIAG] = 0x20, [CPUMF_CTR_SET_MT_DIAG] = 0x20,
}; };
static inline void ctr_set_enable(u64 *state, int ctr_set) static inline void ctr_set_enable(u64 *state, u64 ctrsets)
{
*state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
}
static inline void ctr_set_disable(u64 *state, int ctr_set)
{
*state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
}
static inline void ctr_set_start(u64 *state, int ctr_set)
{
*state |= cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
}
static inline void ctr_set_stop(u64 *state, int ctr_set)
{
*state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
}
static inline void ctr_set_multiple_enable(u64 *state, u64 ctrsets)
{ {
*state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT; *state |= ctrsets << CPUMF_LCCTL_ENABLE_SHIFT;
} }
static inline void ctr_set_multiple_disable(u64 *state, u64 ctrsets) static inline void ctr_set_disable(u64 *state, u64 ctrsets)
{ {
*state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT); *state &= ~(ctrsets << CPUMF_LCCTL_ENABLE_SHIFT);
} }
static inline void ctr_set_multiple_start(u64 *state, u64 ctrsets) static inline void ctr_set_start(u64 *state, u64 ctrsets)
{ {
*state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT; *state |= ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT;
} }
static inline void ctr_set_multiple_stop(u64 *state, u64 ctrsets) static inline void ctr_set_stop(u64 *state, u64 ctrsets)
{ {
*state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT); *state &= ~(ctrsets << CPUMF_LCCTL_ACTCTL_SHIFT);
} }
...@@ -92,8 +75,15 @@ struct cpu_cf_events { ...@@ -92,8 +75,15 @@ struct cpu_cf_events {
struct cpumf_ctr_info info; struct cpumf_ctr_info info;
atomic_t ctr_set[CPUMF_CTR_SET_MAX]; atomic_t ctr_set[CPUMF_CTR_SET_MAX];
atomic64_t alert; atomic64_t alert;
u64 state; u64 state; /* For perf_event_open SVC */
u64 dev_state; /* For /dev/hwctr */
unsigned int flags; unsigned int flags;
size_t used; /* Bytes used in data */
size_t usedss; /* Bytes used in start/stop */
unsigned char start[PAGE_SIZE]; /* Counter set at event add */
unsigned char stop[PAGE_SIZE]; /* Counter set at event delete */
unsigned char data[PAGE_SIZE]; /* Counter set at /dev/hwctr */
unsigned int sets; /* # Counter set saved in memory */
}; };
DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events); DECLARE_PER_CPU(struct cpu_cf_events, cpu_cf_events);
...@@ -124,4 +114,6 @@ static inline int stccm_avail(void) ...@@ -124,4 +114,6 @@ static inline int stccm_avail(void)
size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset, size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
struct cpumf_ctr_info *info); struct cpumf_ctr_info *info);
int cfset_online_cpu(unsigned int cpu);
int cfset_offline_cpu(unsigned int cpu);
#endif /* _ASM_S390_CPU_MCF_H */ #endif /* _ASM_S390_CPU_MCF_H */
...@@ -21,8 +21,6 @@ ...@@ -21,8 +21,6 @@
#define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57) #define CR0_INTERRUPT_KEY_SUBMASK BIT(63 - 57)
#define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58) #define CR0_MEASUREMENT_ALERT_SUBMASK BIT(63 - 58)
#define CR2_GUARDED_STORAGE BIT(63 - 59)
#define CR14_UNUSED_32 BIT(63 - 32) #define CR14_UNUSED_32 BIT(63 - 32)
#define CR14_UNUSED_33 BIT(63 - 33) #define CR14_UNUSED_33 BIT(63 - 33)
#define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35) #define CR14_CHANNEL_REPORT_SUBMASK BIT(63 - 35)
......
...@@ -144,10 +144,6 @@ typedef s390_compat_regs compat_elf_gregset_t; ...@@ -144,10 +144,6 @@ typedef s390_compat_regs compat_elf_gregset_t;
#include <linux/sched/mm.h> /* for task_struct */ #include <linux/sched/mm.h> /* for task_struct */
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/vdso.h>
extern unsigned int vdso_enabled;
/* /*
* This is used to ensure we don't load something for the wrong architecture. * This is used to ensure we don't load something for the wrong architecture.
*/ */
...@@ -176,7 +172,7 @@ struct arch_elf_state { ...@@ -176,7 +172,7 @@ struct arch_elf_state {
!current->mm->context.alloc_pgste) { \ !current->mm->context.alloc_pgste) { \
set_thread_flag(TIF_PGSTE); \ set_thread_flag(TIF_PGSTE); \
set_pt_regs_flag(task_pt_regs(current), \ set_pt_regs_flag(task_pt_regs(current), \
PIF_SYSCALL_RESTART); \ PIF_EXECVE_PGSTE_RESTART); \
_state->rc = -EAGAIN; \ _state->rc = -EAGAIN; \
} \ } \
_state->rc; \ _state->rc; \
...@@ -270,7 +266,6 @@ do { \ ...@@ -270,7 +266,6 @@ do { \
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso_base); \ (unsigned long)current->mm->context.vdso_base); \
} while (0) } while (0)
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP) #define ARCH_EXIT_TO_USER_MODE_WORK (_TIF_GUARDED_STORAGE | _TIF_PER_TRAP)
void do_per_trap(struct pt_regs *regs); void do_per_trap(struct pt_regs *regs);
void do_syscall(struct pt_regs *regs);
#ifdef CONFIG_DEBUG_ENTRY #ifdef CONFIG_DEBUG_ENTRY
static __always_inline void arch_check_user_regs(struct pt_regs *regs) static __always_inline void arch_check_user_regs(struct pt_regs *regs)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#include <asm/asm-const.h> #include <asm/asm-const.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#define __ALIGN .align 4, 0x07 #define __ALIGN .align 16, 0x07
#define __ALIGN_STR __stringify(__ALIGN) #define __ALIGN_STR __stringify(__ALIGN)
/* /*
......
...@@ -23,12 +23,16 @@ ...@@ -23,12 +23,16 @@
#define MCCK_CODE_SYSTEM_DAMAGE BIT(63) #define MCCK_CODE_SYSTEM_DAMAGE BIT(63)
#define MCCK_CODE_EXT_DAMAGE BIT(63 - 5) #define MCCK_CODE_EXT_DAMAGE BIT(63 - 5)
#define MCCK_CODE_CP BIT(63 - 9) #define MCCK_CODE_CP BIT(63 - 9)
#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46) #define MCCK_CODE_STG_ERROR BIT(63 - 16)
#define MCCK_CODE_STG_KEY_ERROR BIT(63 - 18)
#define MCCK_CODE_STG_DEGRAD BIT(63 - 19)
#define MCCK_CODE_PSW_MWP_VALID BIT(63 - 20) #define MCCK_CODE_PSW_MWP_VALID BIT(63 - 20)
#define MCCK_CODE_PSW_IA_VALID BIT(63 - 23) #define MCCK_CODE_PSW_IA_VALID BIT(63 - 23)
#define MCCK_CODE_STG_FAIL_ADDR BIT(63 - 24)
#define MCCK_CODE_CR_VALID BIT(63 - 29) #define MCCK_CODE_CR_VALID BIT(63 - 29)
#define MCCK_CODE_GS_VALID BIT(63 - 36) #define MCCK_CODE_GS_VALID BIT(63 - 36)
#define MCCK_CODE_FC_VALID BIT(63 - 43) #define MCCK_CODE_FC_VALID BIT(63 - 43)
#define MCCK_CODE_CPU_TIMER_VALID BIT(63 - 46)
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc) ...@@ -29,12 +29,6 @@ static inline void preempt_count_set(int pc)
old, new) != old); old, new) != old);
} }
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void) static inline void set_preempt_need_resched(void)
{ {
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count); __atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
...@@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc) ...@@ -88,12 +82,6 @@ static inline void preempt_count_set(int pc)
S390_lowcore.preempt_count = pc; S390_lowcore.preempt_count = pc;
} }
#define init_task_preempt_count(p) do { } while (0)
#define init_idle_preempt_count(p, cpu) do { \
S390_lowcore.preempt_count = PREEMPT_DISABLED; \
} while (0)
static inline void set_preempt_need_resched(void) static inline void set_preempt_need_resched(void)
{ {
} }
...@@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset) ...@@ -130,6 +118,10 @@ static inline bool should_resched(int preempt_offset)
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
#define init_task_preempt_count(p) do { } while (0)
/* Deferred to CPU bringup time */
#define init_idle_preempt_count(p, cpu) do { } while (0)
#ifdef CONFIG_PREEMPTION #ifdef CONFIG_PREEMPTION
extern void preempt_schedule(void); extern void preempt_schedule(void);
#define __preempt_schedule() preempt_schedule() #define __preempt_schedule() preempt_schedule()
......
...@@ -12,12 +12,12 @@ ...@@ -12,12 +12,12 @@
#include <asm/tpi.h> #include <asm/tpi.h>
#define PIF_SYSCALL 0 /* inside a system call */ #define PIF_SYSCALL 0 /* inside a system call */
#define PIF_SYSCALL_RESTART 1 /* restart the current system call */ #define PIF_EXECVE_PGSTE_RESTART 1 /* restart execve for PGSTE binaries */
#define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */ #define PIF_SYSCALL_RET_SET 2 /* return value was set via ptrace */
#define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */ #define PIF_GUEST_FAULT 3 /* indicates program check in sie64a */
#define _PIF_SYSCALL BIT(PIF_SYSCALL) #define _PIF_SYSCALL BIT(PIF_SYSCALL)
#define _PIF_SYSCALL_RESTART BIT(PIF_SYSCALL_RESTART) #define _PIF_EXECVE_PGSTE_RESTART BIT(PIF_EXECVE_PGSTE_RESTART)
#define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET) #define _PIF_SYSCALL_RET_SET BIT(PIF_SYSCALL_RET_SET)
#define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT) #define _PIF_GUEST_FAULT BIT(PIF_GUEST_FAULT)
...@@ -162,6 +162,14 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag) ...@@ -162,6 +162,14 @@ static inline int test_pt_regs_flag(struct pt_regs *regs, int flag)
return !!(regs->flags & (1UL << flag)); return !!(regs->flags & (1UL << flag));
} }
static inline int test_and_clear_pt_regs_flag(struct pt_regs *regs, int flag)
{
int ret = test_pt_regs_flag(regs, flag);
clear_pt_regs_flag(regs, flag);
return ret;
}
/* /*
* These are defined as per linux/ptrace.h, which see. * These are defined as per linux/ptrace.h, which see.
*/ */
......
...@@ -159,6 +159,8 @@ static inline unsigned long kaslr_offset(void) ...@@ -159,6 +159,8 @@ static inline unsigned long kaslr_offset(void)
return __kaslr_offset; return __kaslr_offset;
} }
extern int is_full_image;
static inline u32 gen_lpswe(unsigned long addr) static inline u32 gen_lpswe(unsigned long addr)
{ {
BUILD_BUG_ON(addr > 0xfff); BUILD_BUG_ON(addr > 0xfff);
......
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef __ASM_S390_SOFTIRQ_STACK_H
#define __ASM_S390_SOFTIRQ_STACK_H
#include <asm/lowcore.h>
#include <asm/stacktrace.h>
static inline void do_softirq_own_stack(void)
{
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
}
#endif /* __ASM_S390_SOFTIRQ_STACK_H */
...@@ -74,23 +74,6 @@ struct stack_frame { ...@@ -74,23 +74,6 @@ struct stack_frame {
((unsigned long)__builtin_frame_address(0) - \ ((unsigned long)__builtin_frame_address(0) - \
offsetof(struct stack_frame, back_chain)) offsetof(struct stack_frame, back_chain))
#define CALL_ARGS_0() \
register unsigned long r2 asm("2")
#define CALL_ARGS_1(arg1) \
register unsigned long r2 asm("2") = (unsigned long)(arg1)
#define CALL_ARGS_2(arg1, arg2) \
CALL_ARGS_1(arg1); \
register unsigned long r3 asm("3") = (unsigned long)(arg2)
#define CALL_ARGS_3(arg1, arg2, arg3) \
CALL_ARGS_2(arg1, arg2); \
register unsigned long r4 asm("4") = (unsigned long)(arg3)
#define CALL_ARGS_4(arg1, arg2, arg3, arg4) \
CALL_ARGS_3(arg1, arg2, arg3); \
register unsigned long r4 asm("5") = (unsigned long)(arg4)
#define CALL_ARGS_5(arg1, arg2, arg3, arg4, arg5) \
CALL_ARGS_4(arg1, arg2, arg3, arg4); \
register unsigned long r4 asm("6") = (unsigned long)(arg5)
/* /*
* To keep this simple mark register 2-6 as being changed (volatile) * To keep this simple mark register 2-6 as being changed (volatile)
* by the called function, even though register 6 is saved/nonvolatile. * by the called function, even though register 6 is saved/nonvolatile.
...@@ -109,34 +92,113 @@ struct stack_frame { ...@@ -109,34 +92,113 @@ struct stack_frame {
#define CALL_CLOBBER_1 CALL_CLOBBER_2, "3" #define CALL_CLOBBER_1 CALL_CLOBBER_2, "3"
#define CALL_CLOBBER_0 CALL_CLOBBER_1 #define CALL_CLOBBER_0 CALL_CLOBBER_1
#define CALL_ON_STACK(fn, stack, nr, args...) \ #define CALL_LARGS_0(...) \
long dummy = 0
#define CALL_LARGS_1(t1, a1) \
long arg1 = (long)(t1)(a1)
#define CALL_LARGS_2(t1, a1, t2, a2) \
CALL_LARGS_1(t1, a1); \
long arg2 = (long)(t2)(a2)
#define CALL_LARGS_3(t1, a1, t2, a2, t3, a3) \
CALL_LARGS_2(t1, a1, t2, a2); \
long arg3 = (long)(t3)(a3)
#define CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4) \
CALL_LARGS_3(t1, a1, t2, a2, t3, a3); \
long arg4 = (long)(t4)(a4)
#define CALL_LARGS_5(t1, a1, t2, a2, t3, a3, t4, a4, t5, a5) \
CALL_LARGS_4(t1, a1, t2, a2, t3, a3, t4, a4); \
long arg5 = (long)(t5)(a5)
#define CALL_REGS_0 \
register long r2 asm("2") = dummy
#define CALL_REGS_1 \
register long r2 asm("2") = arg1
#define CALL_REGS_2 \
CALL_REGS_1; \
register long r3 asm("3") = arg2
#define CALL_REGS_3 \
CALL_REGS_2; \
register long r4 asm("4") = arg3
#define CALL_REGS_4 \
CALL_REGS_3; \
register long r5 asm("5") = arg4
#define CALL_REGS_5 \
CALL_REGS_4; \
register long r6 asm("6") = arg5
#define CALL_TYPECHECK_0(...)
#define CALL_TYPECHECK_1(t, a, ...) \
typecheck(t, a)
#define CALL_TYPECHECK_2(t, a, ...) \
CALL_TYPECHECK_1(__VA_ARGS__); \
typecheck(t, a)
#define CALL_TYPECHECK_3(t, a, ...) \
CALL_TYPECHECK_2(__VA_ARGS__); \
typecheck(t, a)
#define CALL_TYPECHECK_4(t, a, ...) \
CALL_TYPECHECK_3(__VA_ARGS__); \
typecheck(t, a)
#define CALL_TYPECHECK_5(t, a, ...) \
CALL_TYPECHECK_4(__VA_ARGS__); \
typecheck(t, a)
#define CALL_PARM_0(...) void
#define CALL_PARM_1(t, a, ...) t
#define CALL_PARM_2(t, a, ...) t, CALL_PARM_1(__VA_ARGS__)
#define CALL_PARM_3(t, a, ...) t, CALL_PARM_2(__VA_ARGS__)
#define CALL_PARM_4(t, a, ...) t, CALL_PARM_3(__VA_ARGS__)
#define CALL_PARM_5(t, a, ...) t, CALL_PARM_4(__VA_ARGS__)
#define CALL_PARM_6(t, a, ...) t, CALL_PARM_5(__VA_ARGS__)
/*
* Use call_on_stack() to call a function switching to a specified
* stack. Proper sign and zero extension of function arguments is
* done. Usage:
*
* rc = call_on_stack(nr, stack, rettype, fn, t1, a1, t2, a2, ...)
*
* - nr specifies the number of function arguments of fn.
* - stack specifies the stack to be used.
* - fn is the function to be called.
* - rettype is the return type of fn.
* - t1, a1, ... are pairs, where t1 must match the type of the first
* argument of fn, t2 the second, etc. a1 is the corresponding
* first function argument (not name), etc.
*/
#define call_on_stack(nr, stack, rettype, fn, ...) \
({ \ ({ \
rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = fn; \
unsigned long frame = current_frame_address(); \ unsigned long frame = current_frame_address(); \
CALL_ARGS_##nr(args); \ unsigned long __stack = stack; \
unsigned long prev; \ unsigned long prev; \
CALL_LARGS_##nr(__VA_ARGS__); \
CALL_REGS_##nr; \
\ \
CALL_TYPECHECK_##nr(__VA_ARGS__); \
asm volatile( \ asm volatile( \
" la %[_prev],0(15)\n" \ " lgr %[_prev],15\n" \
" lg 15,%[_stack]\n" \ " lg 15,%[_stack]\n" \
" stg %[_frame],%[_bc](15)\n" \ " stg %[_frame],%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \ " brasl 14,%[_fn]\n" \
" la 15,0(%[_prev])\n" \ " lgr 15,%[_prev]\n" \
: [_prev] "=&a" (prev), CALL_FMT_##nr \ : [_prev] "=&d" (prev), CALL_FMT_##nr \
: [_stack] "R" (stack), \ : [_stack] "R" (__stack), \
[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ [_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_frame] "d" (frame), \ [_frame] "d" (frame), \
[_fn] "X" (fn) : CALL_CLOBBER_##nr); \ [_fn] "X" (__fn) : CALL_CLOBBER_##nr); \
r2; \ (rettype)r2; \
}) })
#define CALL_ON_STACK_NORETURN(fn, stack) \ #define call_on_stack_noreturn(fn, stack) \
({ \ ({ \
void (*__fn)(void) = fn; \
\
asm volatile( \ asm volatile( \
" la 15,0(%[_stack])\n" \ " la 15,0(%[_stack])\n" \
" xc %[_bc](8,15),%[_bc](15)\n" \ " xc %[_bc](8,15),%[_bc](15)\n" \
" brasl 14,%[_fn]\n" \ " brasl 14,%[_fn]\n" \
::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \ ::[_bc] "i" (offsetof(struct stack_frame, back_chain)), \
[_stack] "a" (stack), [_fn] "X" (fn)); \ [_stack] "a" (stack), [_fn] "X" (__fn)); \
BUG(); \ BUG(); \
}) })
......
...@@ -73,6 +73,10 @@ enum uv_cmds_inst { ...@@ -73,6 +73,10 @@ enum uv_cmds_inst {
BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22, BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
}; };
enum uv_feat_ind {
BIT_UV_FEAT_MISC = 0,
};
struct uv_cb_header { struct uv_cb_header {
u16 len; u16 len;
u16 cmd; /* Command Code */ u16 cmd; /* Command Code */
...@@ -97,7 +101,8 @@ struct uv_cb_qui { ...@@ -97,7 +101,8 @@ struct uv_cb_qui {
u64 max_guest_stor_addr; u64 max_guest_stor_addr;
u8 reserved88[158 - 136]; u8 reserved88[158 - 136];
u16 max_guest_cpu_id; u16 max_guest_cpu_id;
u8 reserveda0[200 - 160]; u64 uv_feature_indications;
u8 reserveda0[200 - 168];
} __packed __aligned(8); } __packed __aligned(8);
/* Initialize Ultravisor */ /* Initialize Ultravisor */
...@@ -274,6 +279,7 @@ struct uv_info { ...@@ -274,6 +279,7 @@ struct uv_info {
unsigned long max_sec_stor_addr; unsigned long max_sec_stor_addr;
unsigned int max_num_sec_conf; unsigned int max_num_sec_conf;
unsigned short max_guest_cpu_id; unsigned short max_guest_cpu_id;
unsigned long uv_feature_indications;
}; };
extern struct uv_info uv_info; extern struct uv_info uv_info;
......
...@@ -4,18 +4,31 @@ ...@@ -4,18 +4,31 @@
#include <vdso/datapage.h> #include <vdso/datapage.h>
/* Default link address for the vDSO */ #ifndef __ASSEMBLY__
#define VDSO64_LBASE 0
#define __VVAR_PAGES 2 #include <generated/vdso64-offsets.h>
#ifdef CONFIG_COMPAT
#include <generated/vdso32-offsets.h>
#endif
#define VDSO_VERSION_STRING LINUX_2.6.29 #define VDSO64_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso64_offset_##name))
#ifdef CONFIG_COMPAT
#ifndef __ASSEMBLY__ #define VDSO32_SYMBOL(tsk, name) ((tsk)->mm->context.vdso_base + (vdso32_offset_##name))
#else
#define VDSO32_SYMBOL(tsk, name) (-1UL)
#endif
extern struct vdso_data *vdso_data; extern struct vdso_data *vdso_data;
int vdso_getcpu_init(void); int vdso_getcpu_init(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
/* Default link address for the vDSO */
#define VDSO_LBASE 0
#define __VVAR_PAGES 2
#define VDSO_VERSION_STRING LINUX_2.6.29
#endif /* __S390_VDSO_H__ */ #endif /* __S390_VDSO_H__ */
...@@ -8,7 +8,6 @@ ...@@ -8,7 +8,6 @@
#include <asm/timex.h> #include <asm/timex.h>
#include <asm/unistd.h> #include <asm/unistd.h>
#include <asm/vdso.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#define vdso_calc_delta __arch_vdso_calc_delta #define vdso_calc_delta __arch_vdso_calc_delta
......
...@@ -71,10 +71,10 @@ obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o ...@@ -71,10 +71,10 @@ obj-$(CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT) += ima_arch.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_cpum_cf_common.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf.o perf_cpum_sf.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_events.o perf_regs.o
obj-$(CONFIG_PERF_EVENTS) += perf_cpum_cf_diag.o
obj-$(CONFIG_TRACEPOINTS) += trace.o obj-$(CONFIG_TRACEPOINTS) += trace.o
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
# vdso # vdso
obj-y += vdso64/ obj-y += vdso64/
obj-$(CONFIG_COMPAT) += vdso32/
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment