ANDROID: KVM: arm64: iommu: Allow driver to flush cached refcount

commit "a737b7d0e721 (ANDROID: KVM: arm64: iommu: Reduce the logic in
generic code)" move unmap_walk logic to the SMMUv3 driver to abstract
the unmap operation.

When that was moved, to avoid exposing the kvm_iommu_flush_unmap_cache()
a WARN_ON was added on the assumption that it is enough to limit the
pgcount called into the driver to KVM_IOMMU_PADDR_CACHE_MAX.
As it turns out, that IOMMU layer in the kernel can map pages with
at page level and unmap them at the block level, so 1 page in a block
expands to 512 enteries in the cache.
So, we need to expose this function to the driver.

Bug: 277989609
Bug: 278749606

Change-Id: Ic3815bdf2f96fd086a7b7c5c5a2ff32478fbf2d3
Signed-off-by: Mostafa Saleh <smostafa@google.com>
This commit is contained in:
Mostafa Saleh 2024-06-25 08:40:55 +00:00
parent 65ea117650
commit 21f5282377
6 changed files with 17 additions and 6 deletions

View File

@ -12,6 +12,7 @@ typedef void (*dyn_hcall_t)(struct user_pt_regs *);
struct kvm_hyp_iommu;
struct iommu_iotlb_gather;
struct kvm_hyp_iommu_domain;
struct kvm_iommu_paddr_cache;
#ifdef CONFIG_MODULES
enum pkvm_psci_notification {
@ -213,7 +214,7 @@ struct pkvm_module_ops {
void (*iommu_reclaim_pages_atomic)(void *p, u8 order);
int (*iommu_snapshot_host_stage2)(struct kvm_hyp_iommu_domain *domain);
int (*hyp_smp_processor_id)(void);
ANDROID_KABI_RESERVE(1);
ANDROID_KABI_USE(1, void (*iommu_flush_unmap_cache)(struct kvm_iommu_paddr_cache *cache));
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);

View File

@ -70,6 +70,8 @@ struct kvm_iommu_paddr_cache {
size_t pgsize[KVM_IOMMU_PADDR_CACHE_MAX];
};
void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache);
/**
* struct kvm_iommu_ops - KVM iommu ops
* @init: init the driver called once before the kernel de-privilege

View File

@ -380,7 +380,7 @@ void kvm_iommu_iotlb_gather_add_page(struct kvm_hyp_iommu_domain *domain,
kvm_iommu_iotlb_gather_add_range(gather, iova, size);
}
static void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache)
void kvm_iommu_flush_unmap_cache(struct kvm_iommu_paddr_cache *cache)
{
while (cache->ptr) {
cache->ptr--;

View File

@ -152,6 +152,7 @@ const struct pkvm_module_ops module_ops = {
.iommu_reclaim_pages_atomic = kvm_iommu_reclaim_pages_atomic,
.iommu_snapshot_host_stage2 = kvm_iommu_snapshot_host_stage2,
.hyp_smp_processor_id = _hyp_smp_processor_id,
.iommu_flush_unmap_cache = kvm_iommu_flush_unmap_cache,
};
int __pkvm_init_module(void *module_init)

View File

@ -41,6 +41,7 @@ extern const struct pkvm_module_ops *mod_ops;
#define kvm_iommu_donate_pages_atomic(x) CALL_FROM_OPS(iommu_donate_pages_atomic, x)
#define kvm_iommu_reclaim_pages_atomic(x, y) CALL_FROM_OPS(iommu_reclaim_pages_atomic, x, y)
#define kvm_iommu_snapshot_host_stage2(x) CALL_FROM_OPS(iommu_snapshot_host_stage2, x)
#define kvm_iommu_flush_unmap_cache(x) CALL_FROM_OPS(iommu_flush_unmap_cache, x)
#endif
#endif /* __ARM_SMMU_V3_MODULE__ */

View File

@ -1300,14 +1300,20 @@ static void kvm_iommu_unmap_walker(struct io_pgtable_ctxt *ctxt)
struct kvm_iommu_walk_data *data = (struct kvm_iommu_walk_data *)ctxt->arg;
struct kvm_iommu_paddr_cache *cache = data->cache;
cache->paddr[cache->ptr] = ctxt->addr;
cache->pgsize[cache->ptr++] = ctxt->size;
/*
* It is guaranteed unmap is called with max of the cache size,
* see kvm_iommu_unmap_pages()
*/
WARN_ON(cache->ptr == KVM_IOMMU_PADDR_CACHE_MAX);
cache->paddr[cache->ptr] = ctxt->addr;
cache->pgsize[cache->ptr++] = ctxt->size;
/* Make more space. */
if(cache->ptr == KVM_IOMMU_PADDR_CACHE_MAX) {
/* Must invalidate TLB first. */
smmu_iotlb_sync(data->cookie, data->iotlb_gather);
iommu_iotlb_gather_init(data->iotlb_gather);
kvm_iommu_flush_unmap_cache(cache);
}
}
static size_t smmu_unmap_pages(struct kvm_hyp_iommu_domain *domain, unsigned long iova,