]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
drm/etnaviv: replace MMU flush marker with flush sequence
authorLucas Stach <l.stach@pengutronix.de>
Fri, 5 Jul 2019 17:17:23 +0000 (19:17 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Apr 2020 08:34:20 +0000 (10:34 +0200)
commit 4900dda90af2cb13bc1d4c12ce94b98acc8fe64e upstream.

If a MMU is shared between multiple GPUs, all of them need to flush their
TLBs, so a single marker that gets reset on the first flush won't do.
Replace the flush marker with a sequence number, so that it's possible to
check if the TLB is in sync with the current page table state for each GPU.

Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: Guido Günther <agx@sigxcpu.org>
Signed-off-by: Robert Beckett <bob.beckett@collabora.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/gpu/drm/etnaviv/etnaviv_buffer.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.h
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.h

index ed9588f36bc9b4a6214eca1e2954f520a0ce8ef4..5fc1b41cb6c53fd62c7edfbc2be8cb7401720121 100644 (file)
@@ -258,6 +258,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
        unsigned int waitlink_offset = buffer->user_size - 16;
        u32 return_target, return_dwords;
        u32 link_target, link_dwords;
+       unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq);
+       bool need_flush = gpu->flush_seq != new_flush_seq;
 
        if (drm_debug & DRM_UT_DRIVER)
                etnaviv_buffer_dump(gpu, buffer, 0, 0x50);
@@ -270,14 +272,14 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
         * need to append a mmu flush load state, followed by a new
         * link to this buffer - a total of four additional words.
         */
-       if (gpu->mmu->need_flush || gpu->switch_context) {
+       if (need_flush || gpu->switch_context) {
                u32 target, extra_dwords;
 
                /* link command */
                extra_dwords = 1;
 
                /* flush command */
-               if (gpu->mmu->need_flush) {
+               if (need_flush) {
                        if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
                                extra_dwords += 1;
                        else
@@ -290,7 +292,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
 
                target = etnaviv_buffer_reserve(gpu, buffer, extra_dwords);
 
-               if (gpu->mmu->need_flush) {
+               if (need_flush) {
                        /* Add the MMU flush */
                        if (gpu->mmu->version == ETNAVIV_IOMMU_V1) {
                                CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
@@ -310,7 +312,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, unsigned int event,
                                        SYNC_RECIPIENT_PE);
                        }
 
-                       gpu->mmu->need_flush = false;
+                       gpu->flush_seq = new_flush_seq;
                }
 
                if (gpu->switch_context) {
index a1562f89c3d7c4e9f0933872460ed8006783f363..1f8c8e4328e45e4e35f59a45576014e3e1dd77d4 100644 (file)
@@ -1353,7 +1353,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
        gpu->active_fence = submit->fence->seqno;
 
        if (gpu->lastctx != cmdbuf->ctx) {
-               gpu->mmu->need_flush = true;
+               gpu->mmu->flush_seq++;
                gpu->switch_context = true;
                gpu->lastctx = cmdbuf->ctx;
        }
index 689cb8f3680c97bcd62836bfd90885f134862650..62b2877d090bb519c0351c1cd7405a191d133d1d 100644 (file)
@@ -138,6 +138,7 @@ struct etnaviv_gpu {
 
        struct etnaviv_iommu *mmu;
        struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
+       unsigned int flush_seq;
 
        /* Power Control: */
        struct clk *clk_bus;
index f103e787de94379e127947ad1707af1f54913d67..0e23a0542f0a9170c4ed93afbf7dc4fe4648d54f 100644 (file)
@@ -132,7 +132,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
                 */
                if (mmu->last_iova) {
                        mmu->last_iova = 0;
-                       mmu->need_flush = true;
+                       mmu->flush_seq++;
                        continue;
                }
 
@@ -246,7 +246,7 @@ int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
        }
 
        list_add_tail(&mapping->mmu_node, &mmu->mappings);
-       mmu->need_flush = true;
+       mmu->flush_seq++;
        mutex_unlock(&mmu->lock);
 
        return ret;
@@ -264,7 +264,7 @@ void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
                etnaviv_iommu_remove_mapping(mmu, mapping);
 
        list_del(&mapping->mmu_node);
-       mmu->need_flush = true;
+       mmu->flush_seq++;
        mutex_unlock(&mmu->lock);
 }
 
@@ -346,7 +346,7 @@ int etnaviv_iommu_get_suballoc_va(struct etnaviv_gpu *gpu, dma_addr_t paddr,
                        return ret;
                }
                mmu->last_iova = vram_node->start + size;
-               gpu->mmu->need_flush = true;
+               mmu->flush_seq++;
                mutex_unlock(&mmu->lock);
 
                *iova = (u32)vram_node->start;
index 54be289e5981c65a9fc2e5a0c11e49071918a21a..ccb6ad3582b82ba4fd55359e9973a3606dc94501 100644 (file)
@@ -44,7 +44,7 @@ struct etnaviv_iommu {
        struct list_head mappings;
        struct drm_mm mm;
        u32 last_iova;
-       bool need_flush;
+       unsigned int flush_seq;
 };
 
 struct etnaviv_gem_object;