mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
Merge branch 'kvm-userspace-hypercall' into HEAD
Make the completion of hypercalls go through the complete_hypercall function pointer argument, no matter if the hypercall exits to userspace or not. Previously, the code assumed that KVM_HC_MAP_GPA_RANGE specifically went to userspace, and all the others did not; the new code need not special case KVM_HC_MAP_GPA_RANGE and in fact does not care at all whether there was an exit to userspace or not.
This commit is contained in:
commit
3eba032bb7
|
@ -2203,12 +2203,6 @@ static inline void kvm_clear_apicv_inhibit(struct kvm *kvm,
|
|||
kvm_set_or_clear_apicv_inhibit(kvm, reason, false);
|
||||
}
|
||||
|
||||
unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
||||
unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3,
|
||||
int op_64_bit, int cpl);
|
||||
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
|
||||
void *insn, int insn_len);
|
||||
void kvm_mmu_print_sptes(struct kvm_vcpu *vcpu, gpa_t gpa, const char *msg);
|
||||
|
|
|
@ -3627,13 +3627,20 @@ static int snp_begin_psc_msr(struct vcpu_svm *svm, u64 ghcb_msr)
|
|||
return 1; /* resume guest */
|
||||
}
|
||||
|
||||
if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) {
|
||||
if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
|
||||
set_ghcb_msr(svm, GHCB_MSR_PSC_RESP_ERROR);
|
||||
return 1; /* resume guest */
|
||||
}
|
||||
|
||||
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
|
||||
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
|
||||
/*
|
||||
* In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
|
||||
* assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
|
||||
* it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
|
||||
* vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
|
||||
*/
|
||||
vcpu->run->hypercall.ret = 0;
|
||||
vcpu->run->hypercall.args[0] = gpa;
|
||||
vcpu->run->hypercall.args[1] = 1;
|
||||
vcpu->run->hypercall.args[2] = (op == SNP_PAGE_STATE_PRIVATE)
|
||||
|
@ -3710,7 +3717,7 @@ static int snp_begin_psc(struct vcpu_svm *svm, struct psc_buffer *psc)
|
|||
bool huge;
|
||||
u64 gfn;
|
||||
|
||||
if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) {
|
||||
if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE)) {
|
||||
snp_complete_psc(svm, VMGEXIT_PSC_ERROR_GENERIC);
|
||||
return 1;
|
||||
}
|
||||
|
@ -3797,6 +3804,13 @@ next_range:
|
|||
case VMGEXIT_PSC_OP_SHARED:
|
||||
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
|
||||
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
|
||||
/*
|
||||
* In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
|
||||
* assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
|
||||
* it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
|
||||
* vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
|
||||
*/
|
||||
vcpu->run->hypercall.ret = 0;
|
||||
vcpu->run->hypercall.args[0] = gfn_to_gpa(gfn);
|
||||
vcpu->run->hypercall.args[1] = npages;
|
||||
vcpu->run->hypercall.args[2] = entry_start.operation == VMGEXIT_PSC_OP_PRIVATE
|
||||
|
|
|
@ -9997,17 +9997,19 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu)
|
|||
if (!is_64_bit_hypercall(vcpu))
|
||||
ret = (u32)ret;
|
||||
kvm_rax_write(vcpu, ret);
|
||||
++vcpu->stat.hypercalls;
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
}
|
||||
|
||||
unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
||||
int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
||||
unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3,
|
||||
int op_64_bit, int cpl)
|
||||
int op_64_bit, int cpl,
|
||||
int (*complete_hypercall)(struct kvm_vcpu *))
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
++vcpu->stat.hypercalls;
|
||||
|
||||
trace_kvm_hypercall(nr, a0, a1, a2, a3);
|
||||
|
||||
if (!op_64_bit) {
|
||||
|
@ -10059,7 +10061,7 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
|||
u64 gpa = a0, npages = a1, attrs = a2;
|
||||
|
||||
ret = -KVM_ENOSYS;
|
||||
if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE)))
|
||||
if (!user_exit_on_hypercall(vcpu->kvm, KVM_HC_MAP_GPA_RANGE))
|
||||
break;
|
||||
|
||||
if (!PAGE_ALIGNED(gpa) || !npages ||
|
||||
|
@ -10070,6 +10072,13 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
|||
|
||||
vcpu->run->exit_reason = KVM_EXIT_HYPERCALL;
|
||||
vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE;
|
||||
/*
|
||||
* In principle this should have been -KVM_ENOSYS, but userspace (QEMU <=9.2)
|
||||
* assumed that vcpu->run->hypercall.ret is never changed by KVM and thus that
|
||||
* it was always zero on KVM_EXIT_HYPERCALL. Since KVM is now overwriting
|
||||
* vcpu->run->hypercall.ret, ensuring that it is zero to not break QEMU.
|
||||
*/
|
||||
vcpu->run->hypercall.ret = 0;
|
||||
vcpu->run->hypercall.args[0] = gpa;
|
||||
vcpu->run->hypercall.args[1] = npages;
|
||||
vcpu->run->hypercall.args[2] = attrs;
|
||||
|
@ -10078,8 +10087,7 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
|||
vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE;
|
||||
|
||||
WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ);
|
||||
vcpu->arch.complete_userspace_io = complete_hypercall_exit;
|
||||
/* stat is incremented on completion. */
|
||||
vcpu->arch.complete_userspace_io = complete_hypercall;
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
|
@ -10088,41 +10096,23 @@ unsigned long __kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
|||
}
|
||||
|
||||
out:
|
||||
++vcpu->stat.hypercalls;
|
||||
return ret;
|
||||
vcpu->run->hypercall.ret = ret;
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__kvm_emulate_hypercall);
|
||||
EXPORT_SYMBOL_GPL(____kvm_emulate_hypercall);
|
||||
|
||||
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long nr, a0, a1, a2, a3, ret;
|
||||
int op_64_bit;
|
||||
int cpl;
|
||||
|
||||
if (kvm_xen_hypercall_enabled(vcpu->kvm))
|
||||
return kvm_xen_hypercall(vcpu);
|
||||
|
||||
if (kvm_hv_hypercall_enabled(vcpu))
|
||||
return kvm_hv_hypercall(vcpu);
|
||||
|
||||
nr = kvm_rax_read(vcpu);
|
||||
a0 = kvm_rbx_read(vcpu);
|
||||
a1 = kvm_rcx_read(vcpu);
|
||||
a2 = kvm_rdx_read(vcpu);
|
||||
a3 = kvm_rsi_read(vcpu);
|
||||
op_64_bit = is_64_bit_hypercall(vcpu);
|
||||
cpl = kvm_x86_call(get_cpl)(vcpu);
|
||||
|
||||
ret = __kvm_emulate_hypercall(vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl);
|
||||
if (nr == KVM_HC_MAP_GPA_RANGE && !ret)
|
||||
/* MAP_GPA tosses the request to the user space. */
|
||||
return 0;
|
||||
|
||||
if (!op_64_bit)
|
||||
ret = (u32)ret;
|
||||
kvm_rax_write(vcpu, ret);
|
||||
|
||||
return kvm_skip_emulated_instruction(vcpu);
|
||||
return __kvm_emulate_hypercall(vcpu, rax, rbx, rcx, rdx, rsi,
|
||||
is_64_bit_hypercall(vcpu),
|
||||
kvm_x86_call(get_cpl)(vcpu),
|
||||
complete_hypercall_exit);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
|
||||
|
||||
|
|
|
@ -616,4 +616,32 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
|
|||
unsigned int port, void *data, unsigned int count,
|
||||
int in);
|
||||
|
||||
static inline bool user_exit_on_hypercall(struct kvm *kvm, unsigned long hc_nr)
|
||||
{
|
||||
return kvm->arch.hypercall_exit_enabled & BIT(hc_nr);
|
||||
}
|
||||
|
||||
int ____kvm_emulate_hypercall(struct kvm_vcpu *vcpu, unsigned long nr,
|
||||
unsigned long a0, unsigned long a1,
|
||||
unsigned long a2, unsigned long a3,
|
||||
int op_64_bit, int cpl,
|
||||
int (*complete_hypercall)(struct kvm_vcpu *));
|
||||
|
||||
#define __kvm_emulate_hypercall(_vcpu, nr, a0, a1, a2, a3, op_64_bit, cpl, complete_hypercall) \
|
||||
({ \
|
||||
int __ret; \
|
||||
\
|
||||
__ret = ____kvm_emulate_hypercall(_vcpu, \
|
||||
kvm_##nr##_read(_vcpu), kvm_##a0##_read(_vcpu), \
|
||||
kvm_##a1##_read(_vcpu), kvm_##a2##_read(_vcpu), \
|
||||
kvm_##a3##_read(_vcpu), op_64_bit, cpl, \
|
||||
complete_hypercall); \
|
||||
\
|
||||
if (__ret > 0) \
|
||||
__ret = complete_hypercall(_vcpu); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue
Block a user