mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 15:03:53 +02:00
KVM: x86: Drop kvm_x86_ops.set_dr6() in favor of a new KVM_RUN flag
Instruct vendor code to load the guest's DR6 into hardware via a new KVM_RUN flag, and remove kvm_x86_ops.set_dr6(), whose sole purpose was to load vcpu->arch.dr6 into hardware when DR6 can be read/written directly by the guest. Note, TDX already WARNs on any run_flag being set, i.e. will yell if KVM thinks DR6 needs to be reloaded. TDX vCPUs force KVM_DEBUGREG_AUTO_SWITCH and never clear the flag, i.e. should never observe KVM_RUN_LOAD_GUEST_DR6. Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20250610232010.162191-4-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
2478b1b220
commit
80c64c7afe
|
@ -49,7 +49,6 @@ KVM_X86_OP(set_idt)
|
|||
KVM_X86_OP(get_gdt)
|
||||
KVM_X86_OP(set_gdt)
|
||||
KVM_X86_OP(sync_dirty_debug_regs)
|
||||
KVM_X86_OP(set_dr6)
|
||||
KVM_X86_OP(set_dr7)
|
||||
KVM_X86_OP(cache_reg)
|
||||
KVM_X86_OP(get_rflags)
|
||||
|
|
|
@ -1676,6 +1676,7 @@ static inline u16 kvm_lapic_irq_dest_mode(bool dest_mode_logical)
|
|||
|
||||
enum kvm_x86_run_flags {
|
||||
KVM_RUN_FORCE_IMMEDIATE_EXIT = BIT(0),
|
||||
KVM_RUN_LOAD_GUEST_DR6 = BIT(1),
|
||||
};
|
||||
|
||||
struct kvm_x86_ops {
|
||||
|
@ -1728,7 +1729,6 @@ struct kvm_x86_ops {
|
|||
void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||
void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
|
||||
void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
|
||||
void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
|
||||
void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
|
||||
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
||||
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -4438,10 +4438,13 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
|
|||
svm_hv_update_vp_id(svm->vmcb, vcpu);
|
||||
|
||||
/*
|
||||
* Run with all-zero DR6 unless needed, so that we can get the exact cause
|
||||
* of a #DB.
|
||||
* Run with all-zero DR6 unless the guest can write DR6 freely, so that
|
||||
* KVM can get the exact cause of a #DB. Note, loading guest DR6 from
|
||||
* KVM's snapshot is only necessary when DR accesses won't exit.
|
||||
*/
|
||||
if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
|
||||
if (unlikely(run_flags & KVM_RUN_LOAD_GUEST_DR6))
|
||||
svm_set_dr6(vcpu, vcpu->arch.dr6);
|
||||
else if (likely(!(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)))
|
||||
svm_set_dr6(vcpu, DR6_ACTIVE_LOW);
|
||||
|
||||
clgi();
|
||||
|
@ -5252,7 +5255,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
|
|||
.set_idt = svm_set_idt,
|
||||
.get_gdt = svm_get_gdt,
|
||||
.set_gdt = svm_set_gdt,
|
||||
.set_dr6 = svm_set_dr6,
|
||||
.set_dr7 = svm_set_dr7,
|
||||
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
|
||||
.cache_reg = svm_cache_reg,
|
||||
|
|
|
@ -489,14 +489,6 @@ static void vt_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
|
|||
vmx_set_gdt(vcpu, dt);
|
||||
}
|
||||
|
||||
static void vt_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
if (is_td_vcpu(vcpu))
|
||||
return;
|
||||
|
||||
vmx_set_dr6(vcpu, val);
|
||||
}
|
||||
|
||||
static void vt_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
if (is_td_vcpu(vcpu))
|
||||
|
@ -943,7 +935,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
|
|||
.set_idt = vt_op(set_idt),
|
||||
.get_gdt = vt_op(get_gdt),
|
||||
.set_gdt = vt_op(set_gdt),
|
||||
.set_dr6 = vt_op(set_dr6),
|
||||
.set_dr7 = vt_op(set_dr7),
|
||||
.sync_dirty_debug_regs = vt_op(sync_dirty_debug_regs),
|
||||
.cache_reg = vt_op(cache_reg),
|
||||
|
|
|
@ -5606,12 +5606,6 @@ void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
|
|||
set_debugreg(DR6_RESERVED, 6);
|
||||
}
|
||||
|
||||
void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
set_debugreg(vcpu->arch.dr6, 6);
|
||||
}
|
||||
|
||||
void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
|
||||
{
|
||||
vmcs_writel(GUEST_DR7, val);
|
||||
|
@ -7370,6 +7364,9 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, u64 run_flags)
|
|||
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
|
||||
vcpu->arch.regs_dirty = 0;
|
||||
|
||||
if (run_flags & KVM_RUN_LOAD_GUEST_DR6)
|
||||
set_debugreg(vcpu->arch.dr6, 6);
|
||||
|
||||
/*
|
||||
* Refresh vmcs.HOST_CR3 if necessary. This must be done immediately
|
||||
* prior to VM-Enter, as the kernel may load a new ASID (PCID) any time
|
||||
|
|
|
@ -11046,7 +11046,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
|||
set_debugreg(vcpu->arch.eff_db[3], 3);
|
||||
/* When KVM_DEBUGREG_WONT_EXIT, dr6 is accessible in guest. */
|
||||
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT))
|
||||
kvm_x86_call(set_dr6)(vcpu, vcpu->arch.dr6);
|
||||
run_flags |= KVM_RUN_LOAD_GUEST_DR6;
|
||||
} else if (unlikely(hw_breakpoint_active())) {
|
||||
set_debugreg(0, 7);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user