mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-22 23:13:01 +02:00
KVM: x86/mmu: Add an is_mirror member for union kvm_mmu_page_role
Introduce a "is_mirror" member to the kvm_mmu_page_role union to identify SPTEs associated with the mirrored EPT. The TDX module maintains the private half of the EPT mapped in the TD in its protected memory. KVM keeps a copy of the private GPAs in a mirrored EPT tree within host memory. This "is_mirror" attribute enables vCPUs to find and get the root page of mirrored EPT from the MMU root list for a guest TD. This also allows KVM MMU code to detect changes in mirrored EPT according to the "is_mirror" mmu page role and propagate the changes to the private EPT managed by TDX module. Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com> Signed-off-by: Rick Edgecombe <rick.p.edgecombe@intel.com> Message-ID: <20240718211230.1492011-6-rick.p.edgecombe@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
3a4eb364a4
commit
6961ab0bae
|
@ -313,10 +313,11 @@ struct kvm_kernel_irq_routing_entry;
|
|||
* the number of unique SPs that can theoretically be created is 2^n, where n
|
||||
* is the number of bits that are used to compute the role.
|
||||
*
|
||||
* But, even though there are 19 bits in the mask below, not all combinations
|
||||
* But, even though there are 20 bits in the mask below, not all combinations
|
||||
* of modes and flags are possible:
|
||||
*
|
||||
* - invalid shadow pages are not accounted, so the bits are effectively 18
|
||||
* - invalid shadow pages are not accounted, mirror pages are not shadowed,
|
||||
* so the bits are effectively 18.
|
||||
*
|
||||
* - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
|
||||
* execonly and ad_disabled are only used for nested EPT which has
|
||||
|
@ -349,7 +350,8 @@ union kvm_mmu_page_role {
|
|||
unsigned ad_disabled:1;
|
||||
unsigned guest_mode:1;
|
||||
unsigned passthrough:1;
|
||||
unsigned :5;
|
||||
unsigned is_mirror:1;
|
||||
unsigned :4;
|
||||
|
||||
/*
|
||||
* This is left at the top of the word so that
|
||||
|
|
|
@ -157,6 +157,11 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
|
|||
return kvm_mmu_role_as_id(sp->role);
|
||||
}
|
||||
|
||||
static inline bool is_mirror_sp(const struct kvm_mmu_page *sp)
|
||||
{
|
||||
return sp->role.is_mirror;
|
||||
}
|
||||
|
||||
static inline void kvm_mmu_alloc_external_spt(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
|
||||
{
|
||||
/*
|
||||
|
|
|
@ -276,6 +276,11 @@ static inline struct kvm_mmu_page *root_to_sp(hpa_t root)
|
|||
return spte_to_child_sp(root);
|
||||
}
|
||||
|
||||
static inline bool is_mirror_sptep(tdp_ptep_t sptep)
|
||||
{
|
||||
return is_mirror_sp(sptep_to_sp(rcu_dereference(sptep)));
|
||||
}
|
||||
|
||||
static inline bool is_mmio_spte(struct kvm *kvm, u64 spte)
|
||||
{
|
||||
return (spte & shadow_mmio_mask) == kvm->arch.shadow_mmio_value &&
|
||||
|
|
Loading…
Reference in New Issue
Block a user