ANDROID: drivers/arm-smmu-v3-kvm: Remove smmu from kvm_arm_smmu_topup_memcache

kvm_arm_smmu_topup_memcache requires smmu argument which is only used
for error printing,
That makes it harder to call this without smmu context, so just
remove the argument as it wasn’t adding much.

Bug: 277989609
Bug: 278749606
Bug: 346441377

Change-Id: Ia037b6f0975ce026f21fab2d39b8d7460baff46f
Signed-off-by: Mostafa Saleh <smostafa@google.com>
This commit is contained in:
Mostafa Saleh 2024-06-11 10:46:06 +00:00
parent aa2f788d12
commit 139e603e6f

View File

@ -72,8 +72,7 @@ extern struct kvm_iommu_ops kvm_nvhe_sym(smmu_ops);
static int atomic_pages; static int atomic_pages;
module_param(atomic_pages, int, 0); module_param(atomic_pages, int, 0);
static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu, static int kvm_arm_smmu_topup_memcache(struct arm_smccc_res *res)
struct arm_smccc_res *res)
{ {
struct kvm_hyp_req req; struct kvm_hyp_req req;
@ -97,19 +96,19 @@ static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu,
return __pkvm_topup_hyp_alloc(req.mem.nr_pages); return __pkvm_topup_hyp_alloc(req.mem.nr_pages);
} }
dev_err(smmu->dev, "Bogus mem request"); pr_err("Bogus mem request");
return -EBADE; return -EBADE;
} }
/* /*
* Issue hypercall, and retry after filling the memcache if necessary. * Issue hypercall, and retry after filling the memcache if necessary.
*/ */
#define kvm_call_hyp_nvhe_mc(smmu, ...) \ #define kvm_call_hyp_nvhe_mc(...) \
({ \ ({ \
struct arm_smccc_res __res; \ struct arm_smccc_res __res; \
do { \ do { \
__res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \ __res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \
} while (__res.a1 && !kvm_arm_smmu_topup_memcache(smmu, &__res));\ } while (__res.a1 && !kvm_arm_smmu_topup_memcache(&__res));\
__res.a1; \ __res.a1; \
}) })
@ -245,7 +244,7 @@ static int kvm_arm_smmu_domain_finalize(struct kvm_arm_smmu_domain *kvm_smmu_dom
} }
kvm_smmu_domain->domain.geometry.force_aperture = true; kvm_smmu_domain->domain.geometry.force_aperture = true;
ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_alloc_domain, ret = kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_alloc_domain,
kvm_smmu_domain->id, kvm_smmu_domain->type); kvm_smmu_domain->id, kvm_smmu_domain->type);
return ret; return ret;
@ -336,7 +335,7 @@ static int kvm_arm_smmu_set_dev_pasid(struct iommu_domain *domain,
for (i = 0; i < fwspec->num_ids; i++) { for (i = 0; i < fwspec->num_ids; i++) {
int sid = fwspec->ids[i]; int sid = fwspec->ids[i];
ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_attach_dev, ret = kvm_call_hyp_nvhe_mc(__pkvm_host_iommu_attach_dev,
host_smmu->id, kvm_smmu_domain->id, host_smmu->id, kvm_smmu_domain->id,
sid, pasid, master->ssid_bits); sid, pasid, master->ssid_bits);
if (ret) { if (ret) {
@ -373,7 +372,6 @@ static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
size_t mapped; size_t mapped;
size_t size = pgsize * pgcount; size_t size = pgsize * pgcount;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain); struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
struct arm_smccc_res res; struct arm_smccc_res res;
do { do {
@ -387,7 +385,7 @@ static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
WARN_ON(mapped > pgcount * pgsize); WARN_ON(mapped > pgcount * pgsize);
pgcount -= mapped / pgsize; pgcount -= mapped / pgsize;
*total_mapped += mapped; *total_mapped += mapped;
} while (*total_mapped < size && !kvm_arm_smmu_topup_memcache(smmu, &res)); } while (*total_mapped < size && !kvm_arm_smmu_topup_memcache(&res));
if (*total_mapped < size) if (*total_mapped < size)
return -EINVAL; return -EINVAL;
@ -403,7 +401,6 @@ static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
size_t total_unmapped = 0; size_t total_unmapped = 0;
size_t size = pgsize * pgcount; size_t size = pgsize * pgcount;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain); struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
struct arm_smccc_res res; struct arm_smccc_res res;
do { do {
@ -423,7 +420,7 @@ static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
* block mapping. * block mapping.
*/ */
} while (total_unmapped < size && } while (total_unmapped < size &&
(unmapped || !kvm_arm_smmu_topup_memcache(smmu, &res))); (unmapped || !kvm_arm_smmu_topup_memcache(&res)));
return total_unmapped; return total_unmapped;
} }