From: Jason Gunthorpe Date: Fri, 10 Jan 2025 16:35:03 +0000 (-0400) Subject: iommu/amd: Change amd_iommu_pgtable to use enum protection_domain_mode X-Git-Tag: v6.14-rc1~116^2^9~2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=13b4ec749163710e3d188d2fed7405308b1b1e73;p=thirdparty%2Flinux.git iommu/amd: Change amd_iommu_pgtable to use enum protection_domain_mode Currently it uses enum io_pgtable_fmt which is from the io pagetable code and most of the enum values are invalid. protection_domain_mode is internal the driver and has the only two valid values. Fix some signatures and variables to use the right type as well. Reviewed-by: Vasant Hegde Signed-off-by: Jason Gunthorpe Link: https://lore.kernel.org/r/5-v2-9776c53c2966+1c7-amd_paging_flags_jgg@nvidia.com Signed-off-by: Joerg Roedel --- diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index d6f4cf8235992..0fb0c8392b530 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -40,7 +40,7 @@ void amd_iommu_disable(void); int amd_iommu_reenable(int mode); int amd_iommu_enable_faulting(unsigned int cpu); extern int amd_iommu_guest_ir; -extern enum io_pgtable_fmt amd_iommu_pgtable; +extern enum protection_domain_mode amd_iommu_pgtable; extern int amd_iommu_gpt_level; extern unsigned long amd_iommu_pgsize_bitmap; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 9db8fd1275bed..1d0a82ab9c1c6 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -152,7 +152,7 @@ struct ivmd_header { bool amd_iommu_dump; bool amd_iommu_irq_remap __read_mostly; -enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1; +enum protection_domain_mode amd_iommu_pgtable = PD_MODE_V1; /* Guest page table level */ int amd_iommu_gpt_level = PAGE_MODE_4_LEVEL; @@ -2164,7 +2164,7 @@ static void print_iommu_info(void) if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE) pr_info("X2APIC enabled\n"); } - if (amd_iommu_pgtable == AMD_IOMMU_V2) { + if (amd_iommu_pgtable == PD_MODE_V2) { pr_info("V2 page table enabled (Paging mode : %d level)\n", amd_iommu_gpt_level); } @@ -3082,10 +3082,10 @@ static int __init early_amd_iommu_init(void) FIELD_GET(FEATURE_GATS, amd_iommu_efr) == GUEST_PGTABLE_5_LEVEL) amd_iommu_gpt_level = PAGE_MODE_5_LEVEL; - if (amd_iommu_pgtable == AMD_IOMMU_V2) { + if (amd_iommu_pgtable == PD_MODE_V2) { if (!amd_iommu_v2_pgtbl_supported()) { pr_warn("Cannot enable v2 page table for DMA-API. Fallback to v1.\n"); - amd_iommu_pgtable = AMD_IOMMU_V1; + amd_iommu_pgtable = PD_MODE_V1; } } @@ -3208,7 +3208,7 @@ static void iommu_snp_enable(void) goto disable_snp; } - if (amd_iommu_pgtable != AMD_IOMMU_V1) { + if (amd_iommu_pgtable != PD_MODE_V1) { pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n"); goto disable_snp; } @@ -3485,9 +3485,9 @@ static int __init parse_amd_iommu_options(char *str) } else if (strncmp(str, "force_isolation", 15) == 0) { amd_iommu_force_isolation = true; } else if (strncmp(str, "pgtbl_v1", 8) == 0) { - amd_iommu_pgtable = AMD_IOMMU_V1; + amd_iommu_pgtable = PD_MODE_V1; } else if (strncmp(str, "pgtbl_v2", 8) == 0) { - amd_iommu_pgtable = AMD_IOMMU_V2; + amd_iommu_pgtable = PD_MODE_V2; } else if (strncmp(str, "irtcachedis", 11) == 0) { amd_iommu_irtcachedis = true; } else if (strncmp(str, "nohugepages", 11) == 0) { diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 649dfd22904f9..3a12ef96e7ea6 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2476,32 +2476,30 @@ struct protection_domain *protection_domain_alloc(int nid) return domain; } -static int pdom_setup_pgtable(struct protection_domain *domain, int pgtable) +static int pdom_setup_pgtable(struct protection_domain *domain) { struct io_pgtable_ops *pgtbl_ops; + enum io_pgtable_fmt fmt; - switch (pgtable) { - case AMD_IOMMU_V1: - domain->pd_mode = PD_MODE_V1; + switch (domain->pd_mode) { + case PD_MODE_V1: + fmt = AMD_IOMMU_V1; break; - case AMD_IOMMU_V2: - domain->pd_mode = PD_MODE_V2; + case PD_MODE_V2: + fmt = AMD_IOMMU_V2; break; - default: - return -EINVAL; } - pgtbl_ops = - alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl.cfg, domain); + pgtbl_ops = alloc_io_pgtable_ops(fmt, &domain->iop.pgtbl.cfg, domain); if (!pgtbl_ops) return -ENOMEM; return 0; } -static inline u64 dma_max_address(int pgtable) +static inline u64 dma_max_address(enum protection_domain_mode pgtable) { - if (pgtable == AMD_IOMMU_V1) + if (pgtable == PD_MODE_V1) return ~0ULL; /* V2 with 4/5 level page table */ @@ -2513,8 +2511,9 @@ static bool amd_iommu_hd_support(struct amd_iommu *iommu) return iommu && (iommu->features & FEATURE_HDSUP); } -static struct iommu_domain *do_iommu_domain_alloc(struct device *dev, u32 flags, - int pgtable) +static struct iommu_domain * +do_iommu_domain_alloc(struct device *dev, u32 flags, + enum protection_domain_mode pgtable) { bool dirty_tracking = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; struct amd_iommu *iommu = get_amd_iommu_from_dev(dev); @@ -2525,7 +2524,8 @@ static struct iommu_domain *do_iommu_domain_alloc(struct device *dev, u32 flags, if (!domain) return ERR_PTR(-ENOMEM); - ret = pdom_setup_pgtable(domain, pgtable); + domain->pd_mode = pgtable; + ret = pdom_setup_pgtable(domain); if (ret) { pdom_id_free(domain->id); kfree(domain); @@ -2563,13 +2563,13 @@ amd_iommu_domain_alloc_paging_flags(struct device *dev, u32 flags, if (!amd_iommu_pasid_supported()) return ERR_PTR(-EOPNOTSUPP); - return do_iommu_domain_alloc(dev, flags, AMD_IOMMU_V2); + return do_iommu_domain_alloc(dev, flags, PD_MODE_V2); } /* Allocate domain with v1 page table for dirty tracking */ if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) { if (amd_iommu_hd_support(iommu)) - return do_iommu_domain_alloc(dev, flags, AMD_IOMMU_V1); + return do_iommu_domain_alloc(dev, flags, PD_MODE_V1); return ERR_PTR(-EOPNOTSUPP); }