ANDROID: KVM: arm64: deduplicate kvm_hyp_iommu

Conditional definition of lock inside kvm_hyp_iommu results in duplicate
BTF types for all structures that directly or indirectly mention
kvm_hyp_iommu. However opaque type is needed to hide hypervisor only
spinlock from the kernel.

Remove "ifdef __KVM_NVHE_HYPERVISOR__" and leave only opaque u32 type.
Use __GENKSYMS__ hack to hide this change from CRC generation, since the
change is ABI compatible.

Add accessors to the lock to use instead of funcions from
"nvhe/spinlock.h". Move build time size check there, right where opaque
type is casted to a concrete type.

When adding accessors we found that no code uses hyp_spin_lock_init on
iommu->lock, so it also adds initializer to kvm_iommu_init_device.

Bug: 350677978
Change-Id: I59a34f9613732d3382fa57d9e9088d2c84d0b732
Signed-off-by: Aleksei Vetrov <vvvvvv@google.com>
This commit is contained in:
Aleksei Vetrov 2024-07-02 16:45:13 +00:00
parent 691810c3b9
commit 740a179b42
4 changed files with 40 additions and 23 deletions

View File

@ -6,6 +6,7 @@
#include <kvm/iommu.h>
#include <linux/io-pgtable.h>
#include <nvhe/spinlock.h>
#if IS_ENABLED(CONFIG_ARM_SMMU_V3_PKVM)
#include <linux/io-pgtable-arm.h>
@ -72,6 +73,28 @@ struct kvm_iommu_paddr_cache {
void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache);
static inline hyp_spinlock_t *kvm_iommu_get_lock(struct kvm_hyp_iommu *iommu)
{
/* See struct kvm_hyp_iommu */
BUILD_BUG_ON(sizeof(iommu->lock) != sizeof(hyp_spinlock_t));
return (hyp_spinlock_t *)(&iommu->lock);
}
static inline void kvm_iommu_lock_init(struct kvm_hyp_iommu *iommu)
{
hyp_spin_lock_init(kvm_iommu_get_lock(iommu));
}
static inline void kvm_iommu_lock(struct kvm_hyp_iommu *iommu)
{
hyp_spin_lock(kvm_iommu_get_lock(iommu));
}
static inline void kvm_iommu_unlock(struct kvm_hyp_iommu *iommu)
{
hyp_spin_unlock(kvm_iommu_get_lock(iommu));
}
/**
* struct kvm_iommu_ops - KVM iommu ops
* @init: init the driver called once before the kernel de-privilege

View File

@ -471,13 +471,13 @@ static int iommu_power_on(struct kvm_power_domain *pd)
bool prev;
int ret;
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
prev = iommu->power_is_off;
iommu->power_is_off = false;
ret = kvm_iommu_ops->resume ? kvm_iommu_ops->resume(iommu) : 0;
if (ret)
iommu->power_is_off = prev;
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
return ret;
}
@ -488,13 +488,13 @@ static int iommu_power_off(struct kvm_power_domain *pd)
bool prev;
int ret;
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
prev = iommu->power_is_off;
iommu->power_is_off = true;
ret = kvm_iommu_ops->suspend ? kvm_iommu_ops->suspend(iommu) : 0;
if (ret)
iommu->power_is_off = prev;
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
return ret;
}
@ -505,8 +505,7 @@ static const struct kvm_power_domain_ops iommu_power_ops = {
int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
{
/* See struct kvm_hyp_iommu */
BUILD_BUG_ON(sizeof(u32) != sizeof(hyp_spinlock_t));
kvm_iommu_lock_init(iommu);
return pkvm_init_power_domain(&iommu->power_domain, &iommu_power_ops);
}

View File

@ -551,13 +551,13 @@ static void smmu_tlb_flush_all(void *cookie)
hyp_read_lock(&smmu_domain->lock);
list_for_each_entry(iommu_node, &smmu_domain->iommu_list, list) {
smmu = to_smmu(iommu_node->iommu);
hyp_spin_lock(&smmu->iommu.lock);
kvm_iommu_lock(&smmu->iommu);
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on) {
hyp_spin_unlock(&smmu->iommu.lock);
kvm_iommu_unlock(&smmu->iommu);
continue;
}
WARN_ON(smmu_send_cmd(smmu, &cmd));
hyp_spin_unlock(&smmu->iommu.lock);
kvm_iommu_unlock(&smmu->iommu);
}
hyp_read_unlock(&smmu_domain->lock);
}
@ -572,7 +572,7 @@ static int smmu_tlb_inv_range_smmu(struct hyp_arm_smmu_v3_device *smmu,
size_t inv_range = granule;
struct hyp_arm_smmu_v3_domain *smmu_domain = domain->priv;
hyp_spin_lock(&smmu->iommu.lock);
kvm_iommu_lock(&smmu->iommu);
if (smmu->iommu.power_is_off && smmu->caches_clean_on_power_on)
goto out_ret;
@ -633,7 +633,7 @@ static int smmu_tlb_inv_range_smmu(struct hyp_arm_smmu_v3_device *smmu,
ret = smmu_sync_cmd(smmu);
out_ret:
hyp_spin_unlock(&smmu->iommu.lock);
kvm_iommu_unlock(&smmu->iommu);
return ret;
}
@ -997,7 +997,7 @@ static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
struct domain_iommu_node *iommu_node = NULL;
hyp_write_lock(&smmu_domain->lock);
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
dst = smmu_get_ste_ptr(smmu, sid);
if (!dst)
goto out_unlock;
@ -1087,7 +1087,7 @@ static int smmu_attach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
out_unlock:
if (ret && iommu_node)
hyp_free(iommu_node);
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
hyp_write_unlock(&smmu_domain->lock);
return ret;
}
@ -1103,7 +1103,7 @@ static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
u64 *cd_table, *cd;
hyp_write_lock(&smmu_domain->lock);
hyp_spin_lock(&iommu->lock);
kvm_iommu_lock(iommu);
dst = smmu_get_ste_ptr(smmu, sid);
if (!dst)
goto out_unlock;
@ -1145,7 +1145,7 @@ static int smmu_detach_dev(struct kvm_hyp_iommu *iommu, struct kvm_hyp_iommu_dom
smmu_put_ref_domain(smmu, smmu_domain);
out_unlock:
hyp_spin_unlock(&iommu->lock);
kvm_iommu_unlock(iommu);
hyp_write_unlock(&smmu_domain->lock);
return ret;
}

View File

@ -5,11 +5,6 @@
#include <asm/kvm_host.h>
#include <kvm/power_domain.h>
#include <linux/io-pgtable.h>
#ifdef __KVM_NVHE_HYPERVISOR__
#include <nvhe/spinlock.h>
#endif
#define HYP_SPINLOCK_SIZE 4
/*
* Domain ID for identity mapped domain that the host can attach
@ -31,10 +26,10 @@
*/
struct kvm_hyp_iommu {
struct kvm_power_domain power_domain;
#ifdef __KVM_NVHE_HYPERVISOR__
hyp_spinlock_t lock;
#ifndef __GENKSYMS__
u32 lock; /* lock size verified in kvm_iommu_get_lock. */
#else
u32 unused; /* HYP_SPINLOCK_SIZE verified at build time. */
u32 unused;
#endif
bool power_is_off;
ANDROID_KABI_RESERVE(1);