Merge branch kvm-arm64/misc-7.1 into kvmarm-master/next

* kvm-arm64/misc-7.1:
  KVM: arm64: selftests: Avoid testing the IMPDEF behavior
  KVM: arm64: Destroy stage-2 page-table in kvm_arch_destroy_vm()
  KVM: arm64: Don't leave mmu->pgt dangling on kvm_init_stage2_mmu() error
  KVM: arm64: Prevent the host from using an smc with imm16 != 0

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2026-04-08 12:26:11 +01:00
commit 94b4ae79eb
4 changed files with 11 additions and 12 deletions

View File

@ -314,6 +314,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
if (is_protected_kvm_enabled())
pkvm_destroy_hyp_vm(kvm);
kvm_uninit_stage2_mmu(kvm);
kvm_destroy_mpidr_data(kvm);
kfree(kvm->arch.sysreg_masks);

View File

@ -796,8 +796,14 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(u64, func_id, host_ctxt, 0);
u64 esr = read_sysreg_el2(SYS_ESR);
bool handled;
if (esr & ESR_ELx_xVC_IMM_MASK) {
cpu_reg(host_ctxt, 0) = SMCCC_RET_NOT_SUPPORTED;
goto exit_skip_instr;
}
func_id &= ~ARM_SMCCC_CALL_HINTS;
handled = kvm_host_psci_handler(host_ctxt, func_id);
@ -806,6 +812,7 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
if (!handled)
default_host_smc_handler(host_ctxt);
exit_skip_instr:
/* SMC was trapped, move ELR past the current PC. */
kvm_skip_host_instr();
}

View File

@ -1013,6 +1013,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t
out_destroy_pgtable:
kvm_stage2_destroy(pgt);
mmu->pgt = NULL;
out_free_pgtable:
kfree(pgt);
return err;

View File

@ -13,7 +13,6 @@
enum {
CLEAR_ACCESS_FLAG,
TEST_ACCESS_FLAG,
};
static u64 *ptep_hva;
@ -49,7 +48,6 @@ do { \
GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_ATTR, par), MAIR_ATTR_NORMAL); \
GUEST_ASSERT_EQ(FIELD_GET(SYS_PAR_EL1_SH, par), PTE_SHARED >> 8); \
GUEST_ASSERT_EQ(par & SYS_PAR_EL1_PA, TEST_ADDR); \
GUEST_SYNC(TEST_ACCESS_FLAG); \
} \
} while (0)
@ -85,10 +83,6 @@ static void guest_code(void)
if (!SYS_FIELD_GET(ID_AA64MMFR1_EL1, HAFDBS, read_sysreg(id_aa64mmfr1_el1)))
GUEST_DONE();
/*
* KVM's software PTW makes the implementation choice that the AT
* instruction sets the access flag.
*/
sysreg_clear_set(tcr_el1, 0, TCR_HA);
isb();
test_at(false);
@ -102,8 +96,8 @@ static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
case CLEAR_ACCESS_FLAG:
/*
* Delete + reinstall the memslot to invalidate stage-2
* mappings of the stage-1 page tables, forcing KVM to
* use the 'slow' AT emulation path.
* mappings of the stage-1 page tables, allowing KVM to
* potentially use the 'slow' AT emulation path.
*
* This and clearing the access flag from host userspace
* ensures that the access flag cannot be set speculatively
@ -112,10 +106,6 @@ static void handle_sync(struct kvm_vcpu *vcpu, struct ucall *uc)
clear_bit(__ffs(PTE_AF), ptep_hva);
vm_mem_region_reload(vcpu->vm, vcpu->vm->memslots[MEM_REGION_PT]);
break;
case TEST_ACCESS_FLAG:
TEST_ASSERT(test_bit(__ffs(PTE_AF), ptep_hva),
"Expected access flag to be set (desc: %lu)", *ptep_hva);
break;
default:
TEST_FAIL("Unexpected SYNC arg: %lu", uc->args[1]);
}