From: Vasant Hegde Date: Mon, 28 Oct 2024 09:38:05 +0000 (+0000) Subject: iommu/amd: Separate page table setup from domain allocation X-Git-Tag: v6.13-rc1~105^2~1^7~8 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b3c989083dabab472eb766d59b1d1fb9f11495d6;p=thirdparty%2Fkernel%2Flinux.git iommu/amd: Separate page table setup from domain allocation Currently protection_domain_alloc() allocates domain and also sets up page table. Page table setup is required for PAGING domain only. Domain type like SVA doesn't need page table. Hence move page table setup code to separate function. Also SVA domain allocation path does not call pdom_setup_pgtable(). Hence remove IOMMU_DOMAIN_SVA type check. Signed-off-by: Vasant Hegde Reviewed-by: Jacob Pan Reviewed-by: Jason Gunthorpe Link: https://lore.kernel.org/r/20241028093810.5901-8-vasant.hegde@amd.com Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 8364cd6fa47d0..6285fd1afd50b 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2265,28 +2265,36 @@ void protection_domain_free(struct protection_domain *domain) struct protection_domain *protection_domain_alloc(unsigned int type, int nid) { - struct io_pgtable_ops *pgtbl_ops; struct protection_domain *domain; - int pgtable; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; domain->id = domain_id_alloc(); - if (!domain->id) - goto err_free; + if (!domain->id) { + kfree(domain); + return NULL; + } spin_lock_init(&domain->lock); INIT_LIST_HEAD(&domain->dev_list); INIT_LIST_HEAD(&domain->dev_data_list); domain->iop.pgtbl.cfg.amd.nid = nid; + return domain; +} + +static int pdom_setup_pgtable(struct protection_domain *domain, + unsigned int type) +{ + struct io_pgtable_ops *pgtbl_ops; + int pgtable; + switch (type) { /* No need to allocate io pgtable ops in passthrough mode */ case IOMMU_DOMAIN_IDENTITY: - case IOMMU_DOMAIN_SVA: - return domain; + return 0; case IOMMU_DOMAIN_DMA: pgtable = amd_iommu_pgtable; break; @@ -2298,7 +2306,7 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid) pgtable = AMD_IOMMU_V1; break; default: - goto err_id; + return -EINVAL; } switch (pgtable) { @@ -2309,20 +2317,14 @@ struct protection_domain *protection_domain_alloc(unsigned int type, int nid) domain->pd_mode = PD_MODE_V2; break; default: - goto err_id; + return -EINVAL; } - pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain); if (!pgtbl_ops) - goto err_id; + return -ENOMEM; - return domain; -err_id: - domain_id_free(domain->id); -err_free: - kfree(domain); - return NULL; + return 0; } static inline u64 dma_max_address(void) @@ -2345,6 +2347,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct protection_domain *domain; struct amd_iommu *iommu = NULL; + int ret; if (dev) iommu = get_amd_iommu_from_dev(dev); @@ -2364,6 +2367,13 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type, if (!domain) return ERR_PTR(-ENOMEM); + ret = pdom_setup_pgtable(domain, type); + if (ret) { + domain_id_free(domain->id); + kfree(domain); + return ERR_PTR(ret); + } + domain->domain.geometry.aperture_start = 0; domain->domain.geometry.aperture_end = dma_max_address(); domain->domain.geometry.force_aperture = true;