LoongArch: KVM: Compile switch.S directly into the kernel

If we directly compile the switch.S file into the kernel, the address of
the kvm_exc_entry function will definitely be within the DMW memory area.
Therefore, we will no longer need to perform a copy relocation of the
kvm_exc_entry.

So this patch compiles switch.S directly into the kernel, and then remove
the copy relocation execution logic for the kvm_exc_entry function.

Cc: stable@vger.kernel.org
Signed-off-by: Xianglai Li <lixianglai@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
This commit is contained in:
Xianglai Li 2026-05-04 09:00:37 +08:00 committed by Huacai Chen
parent 7e2c41bc62
commit 5203012fa6
6 changed files with 41 additions and 42 deletions

View File

@ -3,7 +3,7 @@ obj-y += mm/
obj-y += net/
obj-y += vdso/
obj-$(CONFIG_KVM) += kvm/
obj-$(subst m,y,$(CONFIG_KVM)) += kvm/
# for cleaning
subdir- += boot

View File

@ -20,3 +20,23 @@ asmlinkage void noinstr __no_stack_protector ret_from_kernel_thread(struct task_
struct pt_regs *regs,
int (*fn)(void *),
void *fn_arg);
struct kvm_run;
struct kvm_vcpu;
struct loongarch_fpu;
void kvm_exc_entry(void);
int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
void kvm_save_fpu(struct loongarch_fpu *fpu);
void kvm_restore_fpu(struct loongarch_fpu *fpu);
#ifdef CONFIG_CPU_HAS_LSX
void kvm_save_lsx(struct loongarch_fpu *fpu);
void kvm_restore_lsx(struct loongarch_fpu *fpu);
#endif
#ifdef CONFIG_CPU_HAS_LASX
void kvm_save_lasx(struct loongarch_fpu *fpu);
void kvm_restore_lasx(struct loongarch_fpu *fpu);
#endif

View File

@ -87,7 +87,6 @@ struct kvm_context {
struct kvm_world_switch {
int (*exc_entry)(void);
int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long page_order;
};
#define MAX_PGTABLE_LEVELS 4
@ -359,8 +358,6 @@ void kvm_exc_entry(void);
int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
extern unsigned long vpid_mask;
extern const unsigned long kvm_exception_size;
extern const unsigned long kvm_enter_guest_size;
extern struct kvm_world_switch *kvm_loongarch_ops;
#define SW_GCSR (1 << 0)

View File

@ -7,11 +7,12 @@ include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
obj-y += switch.o
kvm-y += exit.o
kvm-y += interrupt.o
kvm-y += main.o
kvm-y += mmu.o
kvm-y += switch.o
kvm-y += timer.o
kvm-y += tlb.o
kvm-y += vcpu.o

View File

@ -348,8 +348,7 @@ void kvm_arch_disable_virtualization_cpu(void)
static int kvm_loongarch_env_init(void)
{
int cpu, order, ret;
void *addr;
int cpu, ret;
struct kvm_context *context;
vmcs = alloc_percpu(struct kvm_context);
@ -365,30 +364,8 @@ static int kvm_loongarch_env_init(void)
return -ENOMEM;
}
/*
* PGD register is shared between root kernel and kvm hypervisor.
* So world switch entry should be in DMW area rather than TLB area
* to avoid page fault reenter.
*
* In future if hardware pagetable walking is supported, we won't
* need to copy world switch code to DMW area.
*/
order = get_order(kvm_exception_size + kvm_enter_guest_size);
addr = (void *)__get_free_pages(GFP_KERNEL, order);
if (!addr) {
free_percpu(vmcs);
vmcs = NULL;
kfree(kvm_loongarch_ops);
kvm_loongarch_ops = NULL;
return -ENOMEM;
}
memcpy(addr, kvm_exc_entry, kvm_exception_size);
memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
kvm_loongarch_ops->exc_entry = addr;
kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
kvm_loongarch_ops->page_order = order;
kvm_loongarch_ops->exc_entry = (void *)kvm_exc_entry;
kvm_loongarch_ops->enter_guest = (void *)kvm_enter_guest;
vpid_mask = read_csr_gstat();
vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
@ -428,16 +405,10 @@ static int kvm_loongarch_env_init(void)
static void kvm_loongarch_env_exit(void)
{
unsigned long addr;
if (vmcs)
free_percpu(vmcs);
if (kvm_loongarch_ops) {
if (kvm_loongarch_ops->exc_entry) {
addr = (unsigned long)kvm_loongarch_ops->exc_entry;
free_pages(addr, kvm_loongarch_ops->page_order);
}
kfree(kvm_loongarch_ops);
}

View File

@ -4,9 +4,11 @@
*/
#include <linux/linkage.h>
#include <linux/kvm_types.h>
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/loongarch.h>
#include <asm/page.h>
#include <asm/regdef.h>
#include <asm/unwind_hints.h>
@ -100,8 +102,13 @@
* - is still in guest mode, such as pgd table/vmid registers etc,
* - will fix with hw page walk enabled in future
* load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
*
* PGD register is shared between root kernel and kvm hypervisor.
* So world switch entry should be in DMW area rather than TLB area
* to avoid page fault re-enter.
*/
.text
.p2align PAGE_SHIFT
.cfi_sections .debug_frame
SYM_CODE_START(kvm_exc_entry)
UNWIND_HINT_UNDEFINED
@ -190,8 +197,8 @@ ret_to_host:
kvm_restore_host_gpr a2
jr ra
SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL)
SYM_CODE_END(kvm_exc_entry)
EXPORT_SYMBOL_FOR_KVM(kvm_exc_entry)
/*
* int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
@ -215,8 +222,8 @@ SYM_FUNC_START(kvm_enter_guest)
/* Save kvm_vcpu to kscratch */
csrwr a1, KVM_VCPU_KS
kvm_switch_to_guest
SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL)
SYM_FUNC_END(kvm_enter_guest)
EXPORT_SYMBOL_FOR_KVM(kvm_enter_guest)
SYM_FUNC_START(kvm_save_fpu)
fpu_save_csr a0 t1
@ -224,6 +231,7 @@ SYM_FUNC_START(kvm_save_fpu)
fpu_save_cc a0 t1 t2
jr ra
SYM_FUNC_END(kvm_save_fpu)
EXPORT_SYMBOL_FOR_KVM(kvm_save_fpu)
SYM_FUNC_START(kvm_restore_fpu)
fpu_restore_double a0 t1
@ -231,6 +239,7 @@ SYM_FUNC_START(kvm_restore_fpu)
fpu_restore_cc a0 t1 t2
jr ra
SYM_FUNC_END(kvm_restore_fpu)
EXPORT_SYMBOL_FOR_KVM(kvm_restore_fpu)
#ifdef CONFIG_CPU_HAS_LSX
SYM_FUNC_START(kvm_save_lsx)
@ -239,6 +248,7 @@ SYM_FUNC_START(kvm_save_lsx)
lsx_save_data a0 t1
jr ra
SYM_FUNC_END(kvm_save_lsx)
EXPORT_SYMBOL_FOR_KVM(kvm_save_lsx)
SYM_FUNC_START(kvm_restore_lsx)
lsx_restore_data a0 t1
@ -246,6 +256,7 @@ SYM_FUNC_START(kvm_restore_lsx)
fpu_restore_csr a0 t1 t2
jr ra
SYM_FUNC_END(kvm_restore_lsx)
EXPORT_SYMBOL_FOR_KVM(kvm_restore_lsx)
#endif
#ifdef CONFIG_CPU_HAS_LASX
@ -255,6 +266,7 @@ SYM_FUNC_START(kvm_save_lasx)
lasx_save_data a0 t1
jr ra
SYM_FUNC_END(kvm_save_lasx)
EXPORT_SYMBOL_FOR_KVM(kvm_save_lasx)
SYM_FUNC_START(kvm_restore_lasx)
lasx_restore_data a0 t1
@ -262,10 +274,8 @@ SYM_FUNC_START(kvm_restore_lasx)
fpu_restore_csr a0 t1 t2
jr ra
SYM_FUNC_END(kvm_restore_lasx)
EXPORT_SYMBOL_FOR_KVM(kvm_restore_lasx)
#endif
.section ".rodata"
SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
#ifdef CONFIG_CPU_HAS_LBT
STACK_FRAME_NON_STANDARD kvm_restore_fpu