ANDROID: KVM: arm64: iommu: Use unified allocator interface

Instead of using shared memcache that requires ATOMIC allocation, use
a new iommu allocator.

For passing pages we use the new HVCs for unified allocator where pages
would be saved in the buddy allocator, this also adds a new feature
that allows the IOMMU code to allocate large order pages which was not
possible this can be used to allocate PGD for the SMMU.

Bug: 277989609
Bug: 278749606
Change-Id: I15a00d452a706211941107828870ca0d264c08de
Signed-off-by: Mostafa Saleh <smostafa@google.com>
This commit is contained in:
Mostafa Saleh 2023-10-17 08:39:43 +00:00
parent 73a7ad55e3
commit be3195fe7b
9 changed files with 63 additions and 138 deletions

View File

@ -1319,17 +1319,18 @@ int kvm_iommu_init_driver(void);
void kvm_iommu_remove_driver(void);
struct kvm_iommu_ops;
struct kvm_hyp_iommu_memcache;
int kvm_iommu_init_hyp(struct kvm_iommu_ops *hyp_ops,
struct kvm_hyp_iommu_memcache *mc,
unsigned long init_arg);
int kvm_iommu_register_driver(struct kvm_iommu_driver *kern_ops);
/* Allocator interface IDs. */
#define HYP_ALLOC_MGT_HEAP_ID 0
#define HYP_ALLOC_MGT_IOMMU_ID 1
unsigned long __pkvm_reclaim_hyp_alloc_mgt(unsigned long nr_pages);
int __pkvm_topup_hyp_alloc_mgt(unsigned long id, unsigned long nr_pages,
unsigned long sz_alloc);
#endif /* __ARM64_KVM_HOST_H__ */

View File

@ -15,7 +15,7 @@ struct io_pgtable *kvm_arm_io_pgtable_alloc(struct io_pgtable_cfg *cfg,
int kvm_arm_io_pgtable_free(struct io_pgtable *iop);
#endif /* CONFIG_ARM_SMMU_V3_PKVM */
int kvm_iommu_init(struct kvm_iommu_ops *ops, struct kvm_hyp_iommu_memcache *mc,
int kvm_iommu_init(struct kvm_iommu_ops *ops,
unsigned long init_arg);
int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu);
void *kvm_iommu_donate_page(void);
@ -47,5 +47,6 @@ struct kvm_iommu_ops {
};
extern struct kvm_iommu_ops *kvm_iommu_ops;
extern struct hyp_mgt_allocator_ops kvm_iommu_allocator_ops;
#endif /* __ARM64_KVM_NVHE_IOMMU_H__ */

View File

@ -7,9 +7,11 @@
#include <nvhe/alloc.h>
#include <nvhe/alloc_mgt.h>
#include <nvhe/iommu.h>
static struct hyp_mgt_allocator_ops *registered_allocators[] = {
[HYP_ALLOC_MGT_HEAP_ID] = &hyp_alloc_ops,
[HYP_ALLOC_MGT_IOMMU_ID] = &kvm_iommu_allocator_ops,
};
#define MAX_ALLOC_ID (ARRAY_SIZE(registered_allocators))

View File

@ -1547,10 +1547,9 @@ static void handle___pkvm_host_iommu_iova_to_phys(struct kvm_cpu_context *host_c
static void handle___pkvm_iommu_init(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_iommu_ops *, ops, host_ctxt, 1);
DECLARE_REG(struct kvm_hyp_iommu_memcache *, memcache, host_ctxt, 2);
DECLARE_REG(unsigned long, init_arg, host_ctxt, 3);
DECLARE_REG(unsigned long, init_arg, host_ctxt, 2);
cpu_reg(host_ctxt, 1) = kvm_iommu_init(ops, memcache, init_arg);
cpu_reg(host_ctxt, 1) = kvm_iommu_init(ops, init_arg);
}
typedef void (*hcall_t)(struct kvm_cpu_context *);

View File

@ -7,6 +7,7 @@
#include <asm/kvm_hyp.h>
#include <kvm/iommu.h>
#include <nvhe/alloc_mgt.h>
#include <nvhe/iommu.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
@ -17,7 +18,7 @@ struct kvm_iommu_paddr_cache {
u64 paddr[KVM_IOMMU_PADDR_CACHE_MAX];
};
static DEFINE_PER_CPU(struct kvm_iommu_paddr_cache, kvm_iommu_unmap_cache);
struct kvm_hyp_iommu_memcache *kvm_hyp_iommu_memcaches;
/*
* This lock protect domain operations, that can't be done using the atomic refcount
* It is used for alloc/free domains, so it shouldn't have a lot of overhead as
@ -26,17 +27,19 @@ struct kvm_hyp_iommu_memcache *kvm_hyp_iommu_memcaches;
static DEFINE_HYP_SPINLOCK(iommu_domains_lock);
void **kvm_hyp_iommu_domains;
static struct hyp_pool iommu_host_pool;
DECLARE_PER_CPU(struct kvm_hyp_req, host_hyp_reqs);
void *kvm_iommu_donate_page(void)
{
void *p;
int cpu = hyp_smp_processor_id();
struct kvm_hyp_memcache tmp = kvm_hyp_iommu_memcaches[cpu].pages;
unsigned long order;
struct kvm_hyp_req *req = this_cpu_ptr(&host_hyp_reqs);
if (!tmp.nr_pages) {
p = hyp_alloc_pages(&iommu_host_pool, 0);
if (p)
return p;
req->type = KVM_HYP_REQ_TYPE_MEM;
req->mem.dest = REQ_MEM_DEST_HYP_IOMMU;
req->mem.sz_alloc = PAGE_SIZE;
@ -44,27 +47,32 @@ void *kvm_iommu_donate_page(void)
return NULL;
}
if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(tmp.head), 1))
return NULL;
p = pop_hyp_memcache(&tmp, hyp_phys_to_virt, &order);
if (!p)
return NULL;
kvm_hyp_iommu_memcaches[cpu].pages = tmp;
memset(p, 0, PAGE_SIZE);
return p;
}
void kvm_iommu_reclaim_page(void *p)
{
int cpu = hyp_smp_processor_id();
memset(p, 0, PAGE_SIZE);
push_hyp_memcache(&kvm_hyp_iommu_memcaches[cpu].pages, p, hyp_virt_to_phys, 0);
WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(p), 1));
hyp_put_page(&iommu_host_pool, p);
}
int kvm_iommu_refill(struct kvm_hyp_memcache *host_mc)
{
return refill_hyp_pool(&iommu_host_pool, host_mc);
}
void kvm_iommu_reclaim(struct kvm_hyp_memcache *host_mc, int target)
{
reclaim_hyp_pool(&iommu_host_pool, host_mc, target);
}
int kvm_iommu_reclaimable(void)
{
return hyp_pool_free_pages(&iommu_host_pool);
}
struct hyp_mgt_allocator_ops kvm_iommu_allocator_ops = {
.refill = kvm_iommu_refill,
.reclaim = kvm_iommu_reclaim,
.reclaimable = kvm_iommu_reclaimable,
};
static struct kvm_hyp_iommu_domain *
handle_to_domain(pkvm_handle_t domain_id)
{
@ -386,10 +394,8 @@ int kvm_iommu_init_device(struct kvm_hyp_iommu *iommu)
return 0;
}
int kvm_iommu_init(struct kvm_iommu_ops *ops, struct kvm_hyp_iommu_memcache *mc,
unsigned long init_arg)
int kvm_iommu_init(struct kvm_iommu_ops *ops, unsigned long init_arg)
{
enum kvm_pgtable_prot prot;
int ret;
if (WARN_ON(!ops->get_iommu_by_id ||
@ -408,13 +414,8 @@ int kvm_iommu_init(struct kvm_iommu_ops *ops, struct kvm_hyp_iommu_memcache *mc,
if (ret)
return ret;
/* The memcache is shared with the host */
prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_OWNED);
ret = pkvm_create_mappings(mc, mc + NR_CPUS, prot);
if (ret)
return ret;
ret = hyp_pool_init_empty(&iommu_host_pool, 64 /* order = 6*/);
kvm_iommu_ops = ops;
kvm_hyp_iommu_memcaches = mc;
return 0;
return ret;
}

View File

@ -25,12 +25,11 @@ int kvm_iommu_register_driver(struct kvm_iommu_driver *kern_ops)
EXPORT_SYMBOL(kvm_iommu_register_driver);
int kvm_iommu_init_hyp(struct kvm_iommu_ops *hyp_ops,
struct kvm_hyp_iommu_memcache *mc,
unsigned long init_arg)
{
BUG_ON(!hyp_ops || !mc);
BUG_ON(!hyp_ops);
return kvm_call_hyp_nvhe(__pkvm_iommu_init, hyp_ops, kern_hyp_va(mc), init_arg);
return kvm_call_hyp_nvhe(__pkvm_iommu_init, hyp_ops, init_arg);
}
EXPORT_SYMBOL(kvm_iommu_init_hyp);

View File

@ -887,24 +887,30 @@ int __pkvm_register_el2_call(unsigned long hfn_hyp_va)
EXPORT_SYMBOL(__pkvm_register_el2_call);
#endif /* CONFIG_MODULES */
int __pkvm_topup_hyp_alloc(unsigned long nr_pages)
int __pkvm_topup_hyp_alloc_mgt(unsigned long id, unsigned long nr_pages, unsigned long sz_alloc)
{
struct kvm_hyp_memcache mc;
int ret;
init_hyp_memcache(&mc);
ret = topup_hyp_memcache(&mc, nr_pages, 0);
ret = topup_hyp_memcache(&mc, nr_pages, get_order(sz_alloc));
if (ret)
return ret;
ret = kvm_call_hyp_nvhe(__pkvm_hyp_alloc_mgt_refill, HYP_ALLOC_MGT_HEAP_ID,
ret = kvm_call_hyp_nvhe(__pkvm_hyp_alloc_mgt_refill, id,
mc.head, mc.nr_pages);
if (ret)
free_hyp_memcache(&mc);
return ret;
}
EXPORT_SYMBOL(__pkvm_topup_hyp_alloc_mgt);
int __pkvm_topup_hyp_alloc(unsigned long nr_pages)
{
return __pkvm_topup_hyp_alloc_mgt(HYP_ALLOC_MGT_HEAP_ID, nr_pages, PAGE_SIZE);
}
EXPORT_SYMBOL(__pkvm_topup_hyp_alloc);
unsigned long __pkvm_reclaim_hyp_alloc_mgt(unsigned long nr_pages)

View File

@ -43,47 +43,13 @@ struct kvm_arm_smmu_domain {
static size_t kvm_arm_smmu_cur;
static size_t kvm_arm_smmu_count;
static struct hyp_arm_smmu_v3_device *kvm_arm_smmu_array;
static struct kvm_hyp_iommu_memcache *kvm_arm_smmu_memcache;
static DEFINE_IDA(kvm_arm_smmu_domain_ida);
static DEFINE_PER_CPU(local_lock_t, memcache_lock) =
INIT_LOCAL_LOCK(memcache_lock);
extern struct kvm_iommu_ops kvm_nvhe_sym(smmu_ops);
static void *kvm_arm_smmu_alloc_page(void *opaque, unsigned long order)
{
struct arm_smmu_device *smmu = opaque;
struct page *p;
/* No __GFP_ZERO because KVM zeroes the page */
p = alloc_pages_node(dev_to_node(smmu->dev), GFP_ATOMIC, order);
if (!p)
return NULL;
return page_address(p);
}
static void kvm_arm_smmu_free_page(void *va, void *opaque, unsigned long order)
{
free_pages((unsigned long)va, order);
}
static phys_addr_t kvm_arm_smmu_host_pa(void *va)
{
return __pa(va);
}
static void *kvm_arm_smmu_host_va(phys_addr_t pa)
{
return __va(pa);
}
static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu,
struct arm_smccc_res *res)
{
struct kvm_hyp_memcache *mc;
int cpu = raw_smp_processor_id();
struct kvm_hyp_req req;
hyp_reqs_smccc_decode(res, &req);
@ -98,12 +64,9 @@ static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu,
return -EBADE;
}
lockdep_assert_held(this_cpu_ptr(&memcache_lock));
mc = &kvm_arm_smmu_memcache[cpu].pages;
if (req.mem.dest == REQ_MEM_DEST_HYP_IOMMU) {
return __topup_hyp_memcache(mc, req.mem.nr_pages, kvm_arm_smmu_alloc_page,
kvm_arm_smmu_host_pa, smmu, 0);
return __pkvm_topup_hyp_alloc_mgt(HYP_ALLOC_MGT_IOMMU_ID,
+ req.mem.nr_pages, req.mem.sz_alloc);
} else if (req.mem.dest == REQ_MEM_DEST_HYP_ALLOC) {
/* Fill hyp alloc*/
return __pkvm_topup_hyp_alloc(req.mem.nr_pages);
@ -113,21 +76,8 @@ static int kvm_arm_smmu_topup_memcache(struct arm_smmu_device *smmu,
return -EBADE;
}
static void kvm_arm_smmu_reclaim_memcache(void)
{
struct kvm_hyp_memcache *mc;
int cpu = raw_smp_processor_id();
lockdep_assert_held(this_cpu_ptr(&memcache_lock));
mc = &kvm_arm_smmu_memcache[cpu].pages;
__free_hyp_memcache(mc, kvm_arm_smmu_free_page,
kvm_arm_smmu_host_va, NULL);
}
/*
* Issue hypercall, and retry after filling the memcache if necessary.
* After the call, reclaim pages pushed in the memcache by the hypervisor.
*/
#define kvm_call_hyp_nvhe_mc(smmu, ...) \
({ \
@ -135,7 +85,6 @@ static void kvm_arm_smmu_reclaim_memcache(void)
do { \
__res = kvm_call_hyp_nvhe_smccc(__VA_ARGS__); \
} while (__res.a1 && !kvm_arm_smmu_topup_memcache(smmu, &__res));\
kvm_arm_smmu_reclaim_memcache(); \
__res.a1; \
})
@ -246,10 +195,8 @@ static int kvm_arm_smmu_domain_finalize(struct kvm_arm_smmu_domain *kvm_smmu_dom
pgd = (unsigned long)page_to_virt(p);
local_lock_irq(&memcache_lock);
ret = kvm_call_hyp_nvhe_mc(smmu, __pkvm_host_iommu_alloc_domain,
kvm_smmu_domain->id, pgd);
local_unlock_irq(&memcache_lock);
if (ret)
goto err_free_pgd;
@ -342,7 +289,6 @@ static int kvm_arm_smmu_attach_dev(struct iommu_domain *domain,
if (ret)
return ret;
local_lock_irq(&memcache_lock);
for (i = 0; i < fwspec->num_ids; i++) {
int sid = fwspec->ids[i];
@ -352,15 +298,14 @@ static int kvm_arm_smmu_attach_dev(struct iommu_domain *domain,
if (ret) {
dev_err(smmu->dev, "cannot attach device %s (0x%x): %d\n",
dev_name(dev), sid, ret);
goto out_unlock;
goto out_ret;
}
}
master->domain = kvm_smmu_domain;
out_unlock:
out_ret:
if (ret)
kvm_arm_smmu_detach_dev(host_smmu, master);
local_unlock_irq(&memcache_lock);
return ret;
}
@ -370,13 +315,11 @@ static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
gfp_t gfp, size_t *total_mapped)
{
size_t mapped;
unsigned long irqflags;
size_t size = pgsize * pgcount;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
struct arm_smccc_res res;
local_lock_irqsave(&memcache_lock, irqflags);
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_map_pages,
kvm_smmu_domain->id,
@ -389,8 +332,6 @@ static int kvm_arm_smmu_map_pages(struct iommu_domain *domain,
pgcount -= mapped / pgsize;
*total_mapped += mapped;
} while (*total_mapped < size && !kvm_arm_smmu_topup_memcache(smmu, &res));
kvm_arm_smmu_reclaim_memcache();
local_unlock_irqrestore(&memcache_lock, irqflags);
if (*total_mapped < size)
return -EINVAL;
@ -403,14 +344,12 @@ static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather)
{
size_t unmapped;
unsigned long irqflags;
size_t total_unmapped = 0;
size_t size = pgsize * pgcount;
struct kvm_arm_smmu_domain *kvm_smmu_domain = to_kvm_smmu_domain(domain);
struct arm_smmu_device *smmu = kvm_smmu_domain->smmu;
struct arm_smccc_res res;
local_lock_irqsave(&memcache_lock, irqflags);
do {
res = kvm_call_hyp_nvhe_smccc(__pkvm_host_iommu_unmap_pages,
kvm_smmu_domain->id,
@ -429,8 +368,6 @@ static size_t kvm_arm_smmu_unmap_pages(struct iommu_domain *domain,
*/
} while (total_unmapped < size &&
(unmapped || !kvm_arm_smmu_topup_memcache(smmu, &res)));
kvm_arm_smmu_reclaim_memcache();
local_unlock_irqrestore(&memcache_lock, irqflags);
return total_unmapped;
}
@ -689,7 +626,7 @@ static struct platform_driver kvm_arm_smmu_driver = {
static int kvm_arm_smmu_array_alloc(void)
{
int smmu_order, mc_order;
int smmu_order;
struct device_node *np;
kvm_arm_smmu_count = 0;
@ -706,17 +643,7 @@ static int kvm_arm_smmu_array_alloc(void)
if (!kvm_arm_smmu_array)
return -ENOMEM;
mc_order = get_order(NR_CPUS * sizeof(*kvm_arm_smmu_memcache));
kvm_arm_smmu_memcache = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
mc_order);
if (!kvm_arm_smmu_memcache)
goto err_free_array;
return 0;
err_free_array:
free_pages((unsigned long)kvm_arm_smmu_array, smmu_order);
return -ENOMEM;
}
static void kvm_arm_smmu_array_free(void)
@ -725,8 +652,6 @@ static void kvm_arm_smmu_array_free(void)
order = get_order(kvm_arm_smmu_count * sizeof(*kvm_arm_smmu_array));
free_pages((unsigned long)kvm_arm_smmu_array, order);
order = get_order(NR_CPUS * sizeof(*kvm_arm_smmu_memcache));
free_pages((unsigned long)kvm_arm_smmu_memcache, order);
}
/**
@ -765,8 +690,7 @@ static int kvm_arm_smmu_v3_init(void)
kvm_hyp_arm_smmu_v3_smmus = kvm_arm_smmu_array;
kvm_hyp_arm_smmu_v3_count = kvm_arm_smmu_count;
return kvm_iommu_init_hyp(kern_hyp_va(lm_alias(&kvm_nvhe_sym(smmu_ops))),
kvm_arm_smmu_memcache, 0);
return kvm_iommu_init_hyp(kern_hyp_va(lm_alias(&kvm_nvhe_sym(smmu_ops))), 0);
err_free:
kvm_arm_smmu_array_free();

View File

@ -24,14 +24,6 @@ struct kvm_hyp_iommu {
#endif
};
struct kvm_hyp_iommu_memcache {
struct kvm_hyp_memcache pages;
bool needs_page;
} ____cacheline_aligned_in_smp;
extern struct kvm_hyp_iommu_memcache *kvm_nvhe_sym(kvm_hyp_iommu_memcaches);
#define kvm_hyp_iommu_memcaches kvm_nvhe_sym(kvm_hyp_iommu_memcaches)
extern void **kvm_nvhe_sym(kvm_hyp_iommu_domains);
#define kvm_hyp_iommu_domains kvm_nvhe_sym(kvm_hyp_iommu_domains)