]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
iommu/riscv: Disable SADE
authorJason Gunthorpe <jgg@nvidia.com>
Fri, 27 Feb 2026 15:25:37 +0000 (11:25 -0400)
committerJoerg Roedel <joerg.roedel@amd.com>
Tue, 17 Mar 2026 12:14:23 +0000 (13:14 +0100)
In terms of the iommu subystem the SADE/GADE feature "3.4. IOMMU updating
of PTE accessed (A) and dirty (D) updates" is called dirty tracking.

There is no reason to enable HW support for this, and the HW cost
associated with it, unless dirty tracking is actually enabled through
iommufd. It should be a dynamic feature linked to user request.

Further, without implementing the read dirty ops the whole thing is
pointless.

Do not set DC.tc.SADE just because the HW has support for dirty tracking.

Tested-by: Vincent Chen <vincent.chen@sifive.com>
Acked-by: Paul Walmsley <pjw@kernel.org> # arch/riscv
Reviewed-by: Tomasz Jeznach <tjeznach@rivosinc.com>
Tested-by: Tomasz Jeznach <tjeznach@rivosinc.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
drivers/iommu/riscv/iommu.c

index fa2ebfd2f912e1567fd3ea635850b4e4d499e2fb..0b290337247044df5bccd80bdb04c9fd044c5ad2 100644 (file)
@@ -810,7 +810,6 @@ struct riscv_iommu_domain {
        struct list_head bonds;
        spinlock_t lock;                /* protect bonds list updates. */
        int pscid;
-       bool amo_enabled;
        int numa_node;
        unsigned int pgd_mode;
        unsigned long *pgd_root;
@@ -1201,8 +1200,6 @@ static int riscv_iommu_map_pages(struct iommu_domain *iommu_domain,
 
        if (!(prot & IOMMU_WRITE))
                pte_prot = _PAGE_BASE | _PAGE_READ;
-       else if (domain->amo_enabled)
-               pte_prot = _PAGE_BASE | _PAGE_READ | _PAGE_WRITE;
        else
                pte_prot = _PAGE_BASE | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY;
 
@@ -1387,7 +1384,6 @@ static struct iommu_domain *riscv_iommu_alloc_paging_domain(struct device *dev)
        INIT_LIST_HEAD_RCU(&domain->bonds);
        spin_lock_init(&domain->lock);
        domain->numa_node = dev_to_node(iommu->dev);
-       domain->amo_enabled = !!(iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD);
        domain->pgd_mode = pgd_mode;
        domain->pgd_root = iommu_alloc_pages_node_sz(domain->numa_node,
                                                     GFP_KERNEL_ACCOUNT, SZ_4K);
@@ -1512,8 +1508,6 @@ static struct iommu_device *riscv_iommu_probe_device(struct device *dev)
         * the device directory. Do not mark the context valid yet.
         */
        tc = 0;
-       if (iommu->caps & RISCV_IOMMU_CAPABILITIES_AMO_HWAD)
-               tc |= RISCV_IOMMU_DC_TC_SADE;
        for (i = 0; i < fwspec->num_ids; i++) {
                dc = riscv_iommu_get_dc(iommu, fwspec->ids[i]);
                if (!dc) {