Merge branch kvm-arm64/misc-6.18 into kvmarm-master/next

* kvm-arm64/misc-6.18:
  : .
  : .
  : Misc improvements and bug fixes:
  :
  : - Fix XN handling in the S2 page table dumper
  :   (20250809135356.1003520-1-r09922117@csie.ntu.edu.tw)
  :
  : - Fix sanitity checks for huge mapping with pKVM running np guests
  :   (20250815162655.121108-1-ben.horgan@arm.com)
  :
  : - Fix use of TRBE when KVM is disabled, and Linux running under
  :   a lesser hypervisor (20250902-etm_crash-v2-1-aa9713a7306b@oss.qualcomm.com)
  :
  : - Fix out of date MTE-related comments (20250915155234.196288-1-alexandru.elisei@arm.com)
  :
  : - Fix PSCI BE support when running a NV guest (20250916161103.1040727-1-maz@kernel.org)
  :
  : - Fix page reference leak when refusing to map a page due to mismatched attributes
  :   (20250917130737.2139403-1-tabba@google.com)
  :
  : - Add trap handling for PMSDSFR_EL1
  :   (20250901-james-perf-feat_spe_eft-v8-7-2e2738f24559@linaro.org)
  :
  : - Add advertisement from FEAT_LSFE (Large System Float Extension)
  :   (20250918-arm64-lsfe-v4-1-0abc712101c7@kernel.org)
  : .
  KVM: arm64: Expose FEAT_LSFE to guests
  KVM: arm64: Add trap configs for PMSDSFR_EL1
  KVM: arm64: Fix page leak in user_mem_abort()
  KVM: arm64: Fix kvm_vcpu_{set,is}_be() to deal with EL2 state
  KVM: arm64: Update stale comment for sanitise_mte_tags()
  KVM: arm64: Return early from trace helpers when KVM isn't available
  KVM: arm64: Fix debug checking for np-guests using huge mappings
  KVM: arm64: ptdump: Don't test PTE_VALID alongside other attributes

Signed-off-by: Marc Zyngier <maz@kernel.org>
This commit is contained in:
Marc Zyngier 2025-09-20 12:26:29 +01:00
commit 181ce6b01a
8 changed files with 57 additions and 38 deletions

View File

@ -525,21 +525,29 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
if (vcpu_mode_is_32bit(vcpu)) {
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
enum vcpu_sysreg r;
u64 sctlr;
r = vcpu_has_nv(vcpu) ? SCTLR_EL2 : SCTLR_EL1;
sctlr = vcpu_read_sys_reg(vcpu, r);
sctlr |= SCTLR_ELx_EE;
vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
vcpu_write_sys_reg(vcpu, sctlr, r);
}
}
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
{
enum vcpu_sysreg r;
u64 bit;
if (vcpu_mode_is_32bit(vcpu))
return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
if (vcpu_mode_priv(vcpu))
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
else
return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
r = is_hyp_ctxt(vcpu) ? SCTLR_EL2 : SCTLR_EL1;
bit = vcpu_mode_priv(vcpu) ? SCTLR_ELx_EE : SCTLR_EL1_E0E;
return vcpu_read_sys_reg(vcpu, r) & bit;
}
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,

View File

@ -94,6 +94,8 @@
#define VNCR_PMSICR_EL1 0x838
#define VNCR_PMSIRR_EL1 0x840
#define VNCR_PMSLATFR_EL1 0x848
#define VNCR_PMSNEVFR_EL1 0x850
#define VNCR_PMSDSFR_EL1 0x858
#define VNCR_TRFCR_EL1 0x880
#define VNCR_MPAM1_EL1 0x900
#define VNCR_MPAMHCR_EL2 0x930

View File

@ -233,29 +233,29 @@ void kvm_debug_handle_oslar(struct kvm_vcpu *vcpu, u64 val)
preempt_enable();
}
static bool skip_trbe_access(bool skip_condition)
{
return (WARN_ON_ONCE(preemptible()) || skip_condition ||
is_protected_kvm_enabled() || !is_kvm_arm_initialised());
}
void kvm_enable_trbe(void)
{
if (has_vhe() || is_protected_kvm_enabled() ||
WARN_ON_ONCE(preemptible()))
return;
host_data_set_flag(TRBE_ENABLED);
if (!skip_trbe_access(has_vhe()))
host_data_set_flag(TRBE_ENABLED);
}
EXPORT_SYMBOL_GPL(kvm_enable_trbe);
void kvm_disable_trbe(void)
{
if (has_vhe() || is_protected_kvm_enabled() ||
WARN_ON_ONCE(preemptible()))
return;
host_data_clear_flag(TRBE_ENABLED);
if (!skip_trbe_access(has_vhe()))
host_data_clear_flag(TRBE_ENABLED);
}
EXPORT_SYMBOL_GPL(kvm_disable_trbe);
void kvm_tracing_set_el1_configuration(u64 trfcr_while_in_guest)
{
if (is_protected_kvm_enabled() || WARN_ON_ONCE(preemptible()))
if (skip_trbe_access(false))
return;
if (has_vhe()) {

View File

@ -1185,6 +1185,7 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = {
SR_TRAP(SYS_PMSIRR_EL1, CGT_MDCR_TPMS),
SR_TRAP(SYS_PMSLATFR_EL1, CGT_MDCR_TPMS),
SR_TRAP(SYS_PMSNEVFR_EL1, CGT_MDCR_TPMS),
SR_TRAP(SYS_PMSDSFR_EL1, CGT_MDCR_TPMS),
SR_TRAP(SYS_TRFCR_EL1, CGT_MDCR_TTRF),
SR_TRAP(SYS_TRBBASER_EL1, CGT_MDCR_E2TB),
SR_TRAP(SYS_TRBLIMITR_EL1, CGT_MDCR_E2TB),

View File

@ -1010,9 +1010,12 @@ static int __check_host_shared_guest(struct pkvm_hyp_vm *vm, u64 *__phys, u64 ip
return ret;
if (!kvm_pte_valid(pte))
return -ENOENT;
if (kvm_granule_size(level) != size)
if (size && kvm_granule_size(level) != size)
return -E2BIG;
if (!size)
size = kvm_granule_size(level);
state = guest_get_page_state(pte, ipa);
if (state != PKVM_PAGE_SHARED_BORROWED)
return -EPERM;
@ -1100,7 +1103,7 @@ int __pkvm_host_relax_perms_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu, enum kvm_
if (prot & ~KVM_PGTABLE_PROT_RWX)
return -EINVAL;
assert_host_shared_guest(vm, ipa, PAGE_SIZE);
assert_host_shared_guest(vm, ipa, 0);
guest_lock_component(vm);
ret = kvm_pgtable_stage2_relax_perms(&vm->pgt, ipa, prot, 0);
guest_unlock_component(vm);
@ -1156,7 +1159,7 @@ int __pkvm_host_mkyoung_guest(u64 gfn, struct pkvm_hyp_vcpu *vcpu)
if (pkvm_hyp_vm_is_protected(vm))
return -EPERM;
assert_host_shared_guest(vm, ipa, PAGE_SIZE);
assert_host_shared_guest(vm, ipa, 0);
guest_lock_component(vm);
kvm_pgtable_stage2_mkyoung(&vm->pgt, ipa, 0);
guest_unlock_component(vm);

View File

@ -1459,11 +1459,8 @@ static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva)
* able to see the page's tags and therefore they must be initialised first. If
* PG_mte_tagged is set, tags have already been initialised.
*
* The race in the test/set of the PG_mte_tagged flag is handled by:
* - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs
* racing to santise the same page
* - mmap_lock protects between a VM faulting a page in and the VMM performing
* an mprotect() to add VM_MTE
* Must be called with kvm->mmu_lock held to ensure the memory remains mapped
* while the tags are zeroed.
*/
static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn,
unsigned long size)
@ -1706,7 +1703,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* cache maintenance.
*/
if (!kvm_supports_cacheable_pfnmap())
return -EFAULT;
ret = -EFAULT;
} else {
/*
* If the page was identified as device early by looking at
@ -1729,7 +1726,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
}
if (exec_fault && s2_force_noncacheable)
return -ENOEXEC;
ret = -ENOEXEC;
if (ret) {
kvm_release_page_unused(page);
return ret;
}
/*
* Potentially reduce shadow S2 permissions to match the guest's own

View File

@ -32,23 +32,23 @@ static const struct ptdump_prot_bits stage2_pte_bits[] = {
.set = " ",
.clear = "F",
}, {
.mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
.val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
.mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R,
.val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R,
.set = "R",
.clear = " ",
}, {
.mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
.val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
.mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
.val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
.set = "W",
.clear = " ",
}, {
.mask = KVM_PTE_LEAF_ATTR_HI_S2_XN | PTE_VALID,
.val = PTE_VALID,
.set = " ",
.clear = "X",
.mask = KVM_PTE_LEAF_ATTR_HI_S2_XN,
.val = KVM_PTE_LEAF_ATTR_HI_S2_XN,
.set = "NX",
.clear = "x ",
}, {
.mask = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
.val = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
.mask = KVM_PTE_LEAF_ATTR_LO_S2_AF,
.val = KVM_PTE_LEAF_ATTR_LO_S2_AF,
.set = "AF",
.clear = " ",
}, {

View File

@ -1757,7 +1757,8 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
val &= ~ID_AA64ISAR2_EL1_WFxT;
break;
case SYS_ID_AA64ISAR3_EL1:
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_LSFE |
ID_AA64ISAR3_EL1_FAMINMAX;
break;
case SYS_ID_AA64MMFR2_EL1:
val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
@ -3179,6 +3180,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64ISAR2_EL1_APA3 |
ID_AA64ISAR2_EL1_GPA3)),
ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
ID_AA64ISAR3_EL1_LSFE |
ID_AA64ISAR3_EL1_FAMINMAX)),
ID_UNALLOCATED(6,4),
ID_UNALLOCATED(6,5),
@ -3274,6 +3276,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
{ SYS_DESC(SYS_PMBPTR_EL1), undef_access },
{ SYS_DESC(SYS_PMBSR_EL1), undef_access },
{ SYS_DESC(SYS_PMSDSFR_EL1), undef_access },
/* PMBIDR_EL1 is not trapped */
{ PMU_SYS_REG(PMINTENSET_EL1),