mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
Assorted arm64, ACPI and kselftest fixes for 7.1-rc2:
- Avoid writing an uninitialised stack variable to POR_EL0 on
sigreturn if the poe_context record is absent
- Reserve one more page for the early 4K-page kernel mapping to cover
the extra [_text, _stext) split introduced by the non-executable
read-only mapping
- Force the arch_local_irq_*() wrappers to be __always_inline so that
noinstr entry and idle paths cannot call out-of-line, instrumentable
copies
- Fix potential sign extension in the arm64 SCS unwinder's DWARF
advance_loc4 decoding
- Tolerate arm64 ACPI platforms with only WFI and no deeper PSCI idle
states, restoring cpuidle registration on such systems
- Include the UAPI <asm/ptrace.h> header in the arm64 GCS libc test
rather than carrying a duplicate struct user_gcs definition (the
original #ifdef NT_ARM_GCS was wrong to cover the structure
definition as it would be masked out if the toolchain defined it)
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEE5RElWfyWxS+3PLO2a9axLQDIXvEFAmn03FoACgkQa9axLQDI
XvELcxAAmhoarEo1Te6wWyybco9LqvfZPzirij+YYLw0GWuqnN99N+f79FZirTbz
ug9AZiG1PPQY0hCurNWwEjQfWJ6dJYo/4mIT9R1rbeU2MwcxHawIePrM0T8PMBF8
nHMZaEy/EZ8hX3pam98d78F38yFUvxaikghhxQvHLFlQA4nU19IElQCyMogofe05
RTE71nDdMZAnfoOS6cVk7wnH99VLfbqiyl97zUOjnyFNdye99UDovayXPUdUkgbN
clF2qxWInS8TPuoKQPz5hzYkbuR0doFwIasLjSMnOQx+FMZdMmPXEZbwqI/hYl7l
xc5bjKtJH/AQqdoEkZW9MUJ1GhzMttTpoYW9//wgRpJtBDNxisdOE9LpcsCMMNIM
wKLrLVLTXsv5jyPeEFMRtUjd0tJ7bV0f3cO/sv5EVBd238CGT76zwCgjpMtZQqbj
KWsTJpM5oYAsKkBHAYE6XCa5h7kre0/249zH/CYhI/mXJkaHJRM8Ub2CnqBgqeTG
KobtDIUJt+TPAhThj/2OQ/HxP6SLzgBgsgVmVqE1nhkOPlcfg3YYBsgpgN+bzMfG
Z7h14yyCAhunoGRVBMtyUgksAvflR+PIS06soRjLZ5cXcOp/3h+sXs6/XVXHtOr/
UCeO5mfaNUNAr3xJ9oYhuAAT74b7zKXY3YM4NRVASfq6rQS0nWg=
=a9er
-----END PGP SIGNATURE-----
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Catalin Marinas:
- Avoid writing an uninitialised stack variable to POR_EL0 on sigreturn
if the poe_context record is absent
- Reserve one more page for the early 4K-page kernel mapping to cover
the extra [_text, _stext) split introduced by the non-executable
read-only mapping
- Force the arch_local_irq_*() wrappers to be __always_inline so that
noinstr entry and idle paths cannot call out-of-line, instrumentable
copies
- Fix potential sign extension in the arm64 SCS unwinder's DWARF
advance_loc4 decoding
- Tolerate arm64 ACPI platforms with only WFI and no deeper PSCI idle
states, restoring cpuidle registration on such systems
- Include the UAPI <asm/ptrace.h> header in the arm64 GCS libc test
rather than carrying a duplicate struct user_gcs definition (the
original #ifdef NT_ARM_GCS was wrong to cover the structure
definition as it would be masked out if the toolchain defined it)
* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
arm64: signal: Preserve POR_EL0 if poe_context is missing
arm64: Reserve an extra page for early kernel mapping
kselftest/arm64: Include <asm/ptrace.h> for user_gcs definition
ACPI: arm64: cpuidle: Tolerate platforms with no deep PSCI idle states
arm64/irqflags: __always_inline the arch_local_irq_*() helpers
arm64/scs: Fix potential sign extension issue of advance_loc4
This commit is contained in:
commit
cd546f7ae2
|
|
@ -40,7 +40,7 @@ static __always_inline void __pmr_local_irq_enable(void)
|
|||
barrier();
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_enable(void)
|
||||
static __always_inline void arch_local_irq_enable(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
__pmr_local_irq_enable();
|
||||
|
|
@ -68,7 +68,7 @@ static __always_inline void __pmr_local_irq_disable(void)
|
|||
barrier();
|
||||
}
|
||||
|
||||
static inline void arch_local_irq_disable(void)
|
||||
static __always_inline void arch_local_irq_disable(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
__pmr_local_irq_disable();
|
||||
|
|
@ -90,7 +90,7 @@ static __always_inline unsigned long __pmr_local_save_flags(void)
|
|||
/*
|
||||
* Save the current interrupt enable state.
|
||||
*/
|
||||
static inline unsigned long arch_local_save_flags(void)
|
||||
static __always_inline unsigned long arch_local_save_flags(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_local_save_flags();
|
||||
|
|
@ -109,7 +109,7 @@ static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags)
|
|||
return flags != GIC_PRIO_IRQON;
|
||||
}
|
||||
|
||||
static inline bool arch_irqs_disabled_flags(unsigned long flags)
|
||||
static __always_inline bool arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_irqs_disabled_flags(flags);
|
||||
|
|
@ -128,7 +128,7 @@ static __always_inline bool __pmr_irqs_disabled(void)
|
|||
return __pmr_irqs_disabled_flags(__pmr_local_save_flags());
|
||||
}
|
||||
|
||||
static inline bool arch_irqs_disabled(void)
|
||||
static __always_inline bool arch_irqs_disabled(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_irqs_disabled();
|
||||
|
|
@ -160,7 +160,7 @@ static __always_inline unsigned long __pmr_local_irq_save(void)
|
|||
return flags;
|
||||
}
|
||||
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
static __always_inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
return __pmr_local_irq_save();
|
||||
|
|
@ -187,7 +187,7 @@ static __always_inline void __pmr_local_irq_restore(unsigned long flags)
|
|||
/*
|
||||
* restore saved IRQ state
|
||||
*/
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
static __always_inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
if (system_uses_irq_prio_masking()) {
|
||||
__pmr_local_irq_restore(flags);
|
||||
|
|
|
|||
|
|
@ -68,7 +68,12 @@
|
|||
#define KERNEL_SEGMENT_COUNT 5
|
||||
|
||||
#if SWAPPER_BLOCK_SIZE > SEGMENT_ALIGN
|
||||
#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 1)
|
||||
/*
|
||||
* KERNEL_SEGMENT_COUNT counts the permanent kernel VMAs. The early mapping
|
||||
* has one additional split, [_text, _stext). Reserve one more page for the
|
||||
* SWAPPER_BLOCK_SIZE-unaligned boundaries.
|
||||
*/
|
||||
#define EARLY_SEGMENT_EXTRA_PAGES (KERNEL_SEGMENT_COUNT + 2)
|
||||
/*
|
||||
* The initial ID map consists of the kernel image, mapped as two separate
|
||||
* segments, and may appear misaligned wrt the swapper block size. This means
|
||||
|
|
|
|||
|
|
@ -196,9 +196,9 @@ static int scs_handle_fde_frame(const struct eh_frame *frame,
|
|||
loc += *opcode++ * code_alignment_factor;
|
||||
loc += (*opcode++ << 8) * code_alignment_factor;
|
||||
loc += (*opcode++ << 16) * code_alignment_factor;
|
||||
loc += (*opcode++ << 24) * code_alignment_factor;
|
||||
loc += ((u64)*opcode++ << 24) * code_alignment_factor;
|
||||
size -= 4;
|
||||
break;
|
||||
break;
|
||||
|
||||
case DW_CFA_def_cfa:
|
||||
case DW_CFA_offset_extended:
|
||||
|
|
|
|||
|
|
@ -67,6 +67,9 @@ struct rt_sigframe_user_layout {
|
|||
unsigned long end_offset;
|
||||
};
|
||||
|
||||
#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
|
||||
#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
|
||||
|
||||
/*
|
||||
* Holds any EL0-controlled state that influences unprivileged memory accesses.
|
||||
* This includes both accesses done in userspace and uaccess done in the kernel.
|
||||
|
|
@ -74,13 +77,35 @@ struct rt_sigframe_user_layout {
|
|||
* This state needs to be carefully managed to ensure that it doesn't cause
|
||||
* uaccess to fail when setting up the signal frame, and the signal handler
|
||||
* itself also expects a well-defined state when entered.
|
||||
*
|
||||
* The struct should be zero-initialised. Its members should only be accessed
|
||||
* via the accessors below. __valid_fields tracks which of the fields are valid
|
||||
* (have been set to some value).
|
||||
*/
|
||||
struct user_access_state {
|
||||
u64 por_el0;
|
||||
unsigned int __valid_fields;
|
||||
u64 __por_el0;
|
||||
};
|
||||
|
||||
#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
|
||||
#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
|
||||
#define UA_STATE_HAS_POR_EL0 BIT(0)
|
||||
|
||||
static void set_ua_state_por_el0(struct user_access_state *ua_state,
|
||||
u64 por_el0)
|
||||
{
|
||||
ua_state->__por_el0 = por_el0;
|
||||
ua_state->__valid_fields |= UA_STATE_HAS_POR_EL0;
|
||||
}
|
||||
|
||||
static int get_ua_state_por_el0(const struct user_access_state *ua_state,
|
||||
u64 *por_el0)
|
||||
{
|
||||
if (ua_state->__valid_fields & UA_STATE_HAS_POR_EL0) {
|
||||
*por_el0 = ua_state->__por_el0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the user access state into ua_state and reset it to disable any
|
||||
|
|
@ -94,7 +119,7 @@ static void save_reset_user_access_state(struct user_access_state *ua_state)
|
|||
for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
|
||||
por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX);
|
||||
|
||||
ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
|
||||
set_ua_state_por_el0(ua_state, read_sysreg_s(SYS_POR_EL0));
|
||||
write_sysreg_s(por_enable_all, SYS_POR_EL0);
|
||||
/*
|
||||
* No ISB required as we can tolerate spurious Overlay faults -
|
||||
|
|
@ -122,8 +147,10 @@ static void set_handler_user_access_state(void)
|
|||
*/
|
||||
static void restore_user_access_state(const struct user_access_state *ua_state)
|
||||
{
|
||||
if (system_supports_poe())
|
||||
write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
|
||||
u64 por_el0;
|
||||
|
||||
if (get_ua_state_por_el0(ua_state, &por_el0) == 0)
|
||||
write_sysreg_s(por_el0, SYS_POR_EL0);
|
||||
}
|
||||
|
||||
static void init_user_layout(struct rt_sigframe_user_layout *user)
|
||||
|
|
@ -333,11 +360,16 @@ static int restore_fpmr_context(struct user_ctxs *user)
|
|||
static int preserve_poe_context(struct poe_context __user *ctx,
|
||||
const struct user_access_state *ua_state)
|
||||
{
|
||||
int err = 0;
|
||||
int err;
|
||||
u64 por_el0;
|
||||
|
||||
err = get_ua_state_por_el0(ua_state, &por_el0);
|
||||
if (WARN_ON_ONCE(err))
|
||||
return err;
|
||||
|
||||
__put_user_error(POE_MAGIC, &ctx->head.magic, err);
|
||||
__put_user_error(sizeof(*ctx), &ctx->head.size, err);
|
||||
__put_user_error(ua_state->por_el0, &ctx->por_el0, err);
|
||||
__put_user_error(por_el0, &ctx->por_el0, err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
@ -353,7 +385,7 @@ static int restore_poe_context(struct user_ctxs *user,
|
|||
|
||||
__get_user_error(por_el0, &(user->poe->por_el0), err);
|
||||
if (!err)
|
||||
ua_state->por_el0 = por_el0;
|
||||
set_ua_state_por_el0(ua_state, por_el0);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
@ -1095,7 +1127,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
|
|||
{
|
||||
struct pt_regs *regs = current_pt_regs();
|
||||
struct rt_sigframe __user *frame;
|
||||
struct user_access_state ua_state;
|
||||
struct user_access_state ua_state = {};
|
||||
|
||||
/* Always make any pending restarted system calls return -EINTR */
|
||||
current->restart_block.fn = do_no_restart_syscall;
|
||||
|
|
@ -1507,7 +1539,7 @@ static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
|
|||
{
|
||||
struct rt_sigframe_user_layout user;
|
||||
struct rt_sigframe __user *frame;
|
||||
struct user_access_state ua_state;
|
||||
struct user_access_state ua_state = {};
|
||||
int err = 0;
|
||||
|
||||
fpsimd_save_and_flush_current_state();
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
static int psci_acpi_cpu_init_idle(unsigned int cpu)
|
||||
{
|
||||
int i, count;
|
||||
int i;
|
||||
struct acpi_lpi_state *lpi;
|
||||
struct acpi_processor *pr = per_cpu(processors, cpu);
|
||||
|
||||
|
|
@ -30,14 +30,10 @@ static int psci_acpi_cpu_init_idle(unsigned int cpu)
|
|||
if (!psci_ops.cpu_suspend)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
count = pr->power.count - 1;
|
||||
if (count <= 0)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
for (i = 1; i < pr->power.count; i++) {
|
||||
u32 state;
|
||||
|
||||
lpi = &pr->power.lpi_states[i + 1];
|
||||
lpi = &pr->power.lpi_states[i];
|
||||
/*
|
||||
* Only bits[31:0] represent a PSCI power_state while
|
||||
* bits[63:32] must be 0x0 as per ARM ACPI FFH Specification
|
||||
|
|
|
|||
|
|
@ -18,12 +18,6 @@
|
|||
|
||||
#ifndef NT_ARM_GCS
|
||||
#define NT_ARM_GCS 0x410
|
||||
|
||||
struct user_gcs {
|
||||
__u64 features_enabled;
|
||||
__u64 features_locked;
|
||||
__u64 gcspr_el0;
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Shadow Stack/Guarded Control Stack interface */
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <asm/hwcap.h>
|
||||
#include <asm/mman.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#include <linux/compiler.h>
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user