ANDROID: drivers/arm-smmu-v3-kvm: Respect kernel allocation flags

IOMMU map function can be called from atomic context which requires
atomic allocation, for that the map_pages ops has a gfp arg which
the driver ignores at the moment.
Use the new function __pkvm_topup_hyp_alloc_mgt_gfp() and pass
the allocation flags for it for map, otherwise keep the old
behaviour.

Bug: 367702966
Bug: 277989609
Bug: 278749606

Change-Id: I6e8cd24ec4275a619f7db48efaf78f3cd4925ce4
Signed-off-by: Mostafa Saleh <smostafa@google.com>
This commit is contained in:
Mostafa Saleh 2024-09-18 10:09:01 +00:00
parent 09b18ba6f8
commit 028a21e577

View File

@ -72,7 +72,7 @@ extern struct kvm_iommu_ops kvm_nvhe_sym(smmu_ops);
static int atomic_pages; static int atomic_pages;
module_param(atomic_pages, int, 0); module_param(atomic_pages, int, 0);
static int kvm_arm_smmu_topup_memcache(struct arm_smccc_res *res) static int kvm_arm_smmu_topup_memcache(struct arm_smccc_res *res, gfp_t gfp)
{ {
struct kvm_hyp_req req; struct kvm_hyp_req req;
@ -89,8 +89,10 @@ static int kvm_arm_smmu_topup_memcache(struct arm_smccc_res *res)
} }
if (req.mem.dest == REQ_MEM_DEST_HYP_IOMMU) { if (req.mem.dest == REQ_MEM_DEST_HYP_IOMMU) {
return __pkvm_topup_hyp_alloc_mgt(HYP_ALLOC_MGT_IOMMU_ID, return __pkvm_topup_hyp_alloc_mgt_gfp(HYP_ALLOC_MGT_IOMMU_ID,
req.mem.nr_pages, req.mem.sz_alloc); req.mem.nr_pages,
req.mem.sz_alloc,
gfp);
} else if (req.mem.dest == REQ_MEM_DEST_HYP_ALLOC) { } else if (req.mem.dest == REQ_MEM_DEST_HYP_ALLOC) {
/* Fill hyp alloc*/ /* Fill hyp alloc*/
return __pkvm_topup_hyp_alloc(req.mem.nr_pages); return __pkvm_topup_hyp_alloc(req.mem.nr_pages);
@ -108,7 +110,7 @@ static int kvm_arm_smmu_topup_memcache(struct arm_smccc_res *res)
struct arm_smccc_res __res; \ struct arm_smccc_res __res; \
do { \ do { \
__res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \ __res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \
} while (__res.a1 && !kvm_arm_smmu_topup_memcache(&__res));\ } while (__res.a1 && !kvm_arm_smmu_topup_memcache(&__res, GFP_KERNEL));\
__res.a1; \ __res.a1; \
}) })
@ -395,7 +397,7 @@ static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
WARN_ON(mapped > pgcount * pgsize); WARN_ON(mapped > pgcount * pgsize);
pgcount -= mapped / pgsize; pgcount -= mapped / pgsize;
*total_mapped += mapped; *total_mapped += mapped;
} while (*total_mapped < size && !kvm_arm_smmu_topup_memcache(&res)); } while (*total_mapped < size && !kvm_arm_smmu_topup_memcache(&res, gfp));
if (*total_mapped < size) if (*total_mapped < size)
return -EINVAL; return -EINVAL;
@ -430,7 +432,7 @@ static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
* block mapping. * block mapping.
*/ */
} while (total_unmapped < size && } while (total_unmapped < size &&
(unmapped || !kvm_arm_smmu_topup_memcache(&res))); (unmapped || !kvm_arm_smmu_topup_memcache(&res, GFP_ATOMIC)));
return total_unmapped; return total_unmapped;
} }