iommu/amd: Remove type argument from do_iommu_domain_alloc() and related

do_iommu_domain_alloc() is only called from
amd_iommu_domain_alloc_paging_flags() so type is always
IOMMU_DOMAIN_UNMANAGED. Remove type and all the dead conditionals checking
it.

IOMMU_DOMAIN_IDENTITY checks are similarly obsolete as the conversion to
the global static identity domain removed those call paths.

The caller of protection_domain_alloc() should set the type, fix the miss
in the SVA code.

Reviewed-by: Vasant Hegde <vasant.hegde@amd.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/4-v2-9776c53c2966+1c7-amd_paging_flags_jgg@nvidia.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Jason Gunthorpe 2025-01-10 12:35:02 -04:00 committed by Joerg Roedel
parent 02bcd1a8b9
commit 55b237dd7f
3 changed files with 13 additions and 27 deletions

View File

@ -46,7 +46,7 @@ extern unsigned long amd_iommu_pgsize_bitmap;
/* Protection domain ops */
void amd_iommu_init_identity_domain(void);
struct protection_domain *protection_domain_alloc(unsigned int type, int nid);
struct protection_domain *protection_domain_alloc(int nid);
void protection_domain_free(struct protection_domain *domain);
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct mm_struct *mm);

View File

@ -2455,7 +2455,7 @@ static void protection_domain_init(struct protection_domain *domain, int nid)
domain->iop.pgtbl.cfg.amd.nid = nid;
}
struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
struct protection_domain *protection_domain_alloc(int nid)
{
struct protection_domain *domain;
int domid;
@ -2476,15 +2476,10 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid)
return domain;
}
static int pdom_setup_pgtable(struct protection_domain *domain,
unsigned int type, int pgtable)
static int pdom_setup_pgtable(struct protection_domain *domain, int pgtable)
{
struct io_pgtable_ops *pgtbl_ops;
/* No need to allocate io pgtable ops in passthrough mode */
if (!(type & __IOMMU_DOMAIN_PAGING))
return 0;
switch (pgtable) {
case AMD_IOMMU_V1:
domain->pd_mode = PD_MODE_V1;
@ -2518,27 +2513,19 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu)
return iommu && (iommu->features & FEATURE_HDSUP);
}
static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
struct device *dev,
u32 flags, int pgtable)
static struct iommu_domain *do_iommu_domain_alloc(struct device *dev, u32 flags,
int pgtable)
{
bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
struct protection_domain *domain;
int ret;
/*
* Since DTE[Mode]=0 is prohibited on SNP-enabled system,
* default to use IOMMU_DOMAIN_DMA[_FQ].
*/
if (amd_iommu_snp_en && (type == IOMMU_DOMAIN_IDENTITY))
return ERR_PTR(-EINVAL);
domain = protection_domain_alloc(type, dev_to_node(dev));
domain = protection_domain_alloc(dev_to_node(dev));
if (!domain)
return ERR_PTR(-ENOMEM);
ret = pdom_setup_pgtable(domain, type, pgtable);
ret = pdom_setup_pgtable(domain, pgtable);
if (ret) {
pdom_id_free(domain->id);
kfree(domain);
@ -2550,7 +2537,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
domain->domain.geometry.force_aperture = true;
domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
domain->domain.type = type;
domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
domain->domain.ops = iommu->iommu.ops->default_domain_ops;
if (dirty_tracking)
@ -2564,7 +2551,6 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
const struct iommu_user_data *user_data)
{
unsigned int type = IOMMU_DOMAIN_UNMANAGED;
struct amd_iommu *iommu = get_amd_iommu_from_dev(dev);
const u32 supported_flags = IOMMU_HWPT_ALLOC_DIRTY_TRACKING |
IOMMU_HWPT_ALLOC_PASID;
@ -2577,20 +2563,19 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags,
if (!amd_iommu_pasid_supported())
return ERR_PTR(-EOPNOTSUPP);
return do_iommu_domain_alloc(type, dev, flags, AMD_IOMMU_V2);
return do_iommu_domain_alloc(dev, flags, AMD_IOMMU_V2);
}
/* Allocate domain with v1 page table for dirty tracking */
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) {
if (amd_iommu_hd_support(iommu))
return do_iommu_domain_alloc(type, dev, flags,
AMD_IOMMU_V1);
return do_iommu_domain_alloc(dev, flags, AMD_IOMMU_V1);
return ERR_PTR(-EOPNOTSUPP);
}
/* If nothing specific is required use the kernel commandline default */
return do_iommu_domain_alloc(type, dev, 0, amd_iommu_pgtable);
return do_iommu_domain_alloc(dev, 0, amd_iommu_pgtable);
}
void amd_iommu_domain_free(struct iommu_domain *dom)

View File

@ -185,12 +185,13 @@ struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
struct protection_domain *pdom;
int ret;
pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA, dev_to_node(dev));
pdom = protection_domain_alloc(dev_to_node(dev));
if (!pdom)
return ERR_PTR(-ENOMEM);
pdom->domain.ops = &amd_sva_domain_ops;
pdom->mn.ops = &sva_mn;
pdom->domain.type = IOMMU_DOMAIN_SVA;
ret = mmu_notifier_register(&pdom->mn, mm);
if (ret) {