mirror of
https://github.com/torvalds/linux.git
synced 2026-05-12 16:18:45 +02:00
KVM: arm64: Implement the MEM_SHARE hypercall for protected VMs
Implement the ARM_SMCCC_KVM_FUNC_MEM_SHARE hypercall to allow protected VMs to share memory (e.g. the swiotlb bounce buffers) back to the host. Reviewed-by: Vincent Donnefort <vdonnefort@google.com> Tested-by: Fuad Tabba <tabba@google.com> Tested-by: Mostafa Saleh <smostafa@google.com> Signed-off-by: Will Deacon <will@kernel.org> Link: https://patch.msgid.link/20260330144841.26181-30-will@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
parent
94c5250515
commit
03313efed5
|
|
@ -34,6 +34,7 @@ extern unsigned long hyp_nr_cpus;
|
|||
|
||||
int __pkvm_prot_finalize(void);
|
||||
int __pkvm_host_share_hyp(u64 pfn);
|
||||
int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn);
|
||||
int __pkvm_host_unshare_hyp(u64 pfn);
|
||||
int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages);
|
||||
int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages);
|
||||
|
|
|
|||
|
|
@ -959,6 +959,38 @@ int __pkvm_host_share_hyp(u64 pfn)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int __pkvm_guest_share_host(struct pkvm_hyp_vcpu *vcpu, u64 gfn)
|
||||
{
|
||||
struct pkvm_hyp_vm *vm = pkvm_hyp_vcpu_to_hyp_vm(vcpu);
|
||||
u64 phys, ipa = hyp_pfn_to_phys(gfn);
|
||||
kvm_pte_t pte;
|
||||
int ret;
|
||||
|
||||
host_lock_component();
|
||||
guest_lock_component(vm);
|
||||
|
||||
ret = get_valid_guest_pte(vm, ipa, &pte, &phys);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
|
||||
ret = -EPERM;
|
||||
if (pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte)) != PKVM_PAGE_OWNED)
|
||||
goto unlock;
|
||||
if (__host_check_page_state_range(phys, PAGE_SIZE, PKVM_NOPAGE))
|
||||
goto unlock;
|
||||
|
||||
ret = 0;
|
||||
WARN_ON(kvm_pgtable_stage2_map(&vm->pgt, ipa, PAGE_SIZE, phys,
|
||||
pkvm_mkstate(KVM_PGTABLE_PROT_RWX, PKVM_PAGE_SHARED_OWNED),
|
||||
&vcpu->vcpu.arch.pkvm_memcache, 0));
|
||||
WARN_ON(__host_set_page_state_range(phys, PAGE_SIZE, PKVM_PAGE_SHARED_BORROWED));
|
||||
unlock:
|
||||
guest_unlock_component(vm);
|
||||
host_unlock_component();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __pkvm_host_unshare_hyp(u64 pfn)
|
||||
{
|
||||
u64 phys = hyp_pfn_to_phys(pfn);
|
||||
|
|
|
|||
|
|
@ -973,6 +973,58 @@ int __pkvm_finalize_teardown_vm(pkvm_handle_t handle)
|
|||
hyp_spin_unlock(&vm_table_lock);
|
||||
return err;
|
||||
}
|
||||
|
||||
static u64 __pkvm_memshare_page_req(struct kvm_vcpu *vcpu, u64 ipa)
|
||||
{
|
||||
u64 elr;
|
||||
|
||||
/* Fake up a data abort (level 3 translation fault on write) */
|
||||
vcpu->arch.fault.esr_el2 = (ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT) |
|
||||
ESR_ELx_WNR | ESR_ELx_FSC_FAULT |
|
||||
FIELD_PREP(ESR_ELx_FSC_LEVEL, 3);
|
||||
|
||||
/* Shuffle the IPA around into the HPFAR */
|
||||
vcpu->arch.fault.hpfar_el2 = (HPFAR_EL2_NS | (ipa >> 8)) & HPFAR_MASK;
|
||||
|
||||
/* This is a virtual address. 0's good. Let's go with 0. */
|
||||
vcpu->arch.fault.far_el2 = 0;
|
||||
|
||||
/* Rewind the ELR so we return to the HVC once the IPA is mapped */
|
||||
elr = read_sysreg(elr_el2);
|
||||
elr -= 4;
|
||||
write_sysreg(elr, elr_el2);
|
||||
|
||||
return ARM_EXCEPTION_TRAP;
|
||||
}
|
||||
|
||||
static bool pkvm_memshare_call(u64 *ret, struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
struct pkvm_hyp_vcpu *hyp_vcpu;
|
||||
u64 ipa = smccc_get_arg1(vcpu);
|
||||
|
||||
if (!PAGE_ALIGNED(ipa))
|
||||
goto out_guest;
|
||||
|
||||
hyp_vcpu = container_of(vcpu, struct pkvm_hyp_vcpu, vcpu);
|
||||
switch (__pkvm_guest_share_host(hyp_vcpu, hyp_phys_to_pfn(ipa))) {
|
||||
case 0:
|
||||
ret[0] = SMCCC_RET_SUCCESS;
|
||||
goto out_guest;
|
||||
case -ENOENT:
|
||||
/*
|
||||
* Convert the exception into a data abort so that the page
|
||||
* being shared is mapped into the guest next time.
|
||||
*/
|
||||
*exit_code = __pkvm_memshare_page_req(vcpu, ipa);
|
||||
goto out_host;
|
||||
}
|
||||
|
||||
out_guest:
|
||||
return true;
|
||||
out_host:
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler for protected VM HVC calls.
|
||||
*
|
||||
|
|
@ -989,6 +1041,7 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
case ARM_SMCCC_VENDOR_HYP_KVM_FEATURES_FUNC_ID:
|
||||
val[0] = BIT(ARM_SMCCC_KVM_FUNC_FEATURES);
|
||||
val[0] |= BIT(ARM_SMCCC_KVM_FUNC_HYP_MEMINFO);
|
||||
val[0] |= BIT(ARM_SMCCC_KVM_FUNC_MEM_SHARE);
|
||||
break;
|
||||
case ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID:
|
||||
if (smccc_get_arg1(vcpu) ||
|
||||
|
|
@ -999,6 +1052,14 @@ bool kvm_handle_pvm_hvc64(struct kvm_vcpu *vcpu, u64 *exit_code)
|
|||
|
||||
val[0] = PAGE_SIZE;
|
||||
break;
|
||||
case ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID:
|
||||
if (smccc_get_arg2(vcpu) ||
|
||||
smccc_get_arg3(vcpu)) {
|
||||
break;
|
||||
}
|
||||
|
||||
handled = pkvm_memshare_call(val, vcpu, exit_code);
|
||||
break;
|
||||
default:
|
||||
/* Punt everything else back to the host, for now. */
|
||||
handled = false;
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user