]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
crypto: marvell/cesa - Do not chain submitted requests
authorHerbert Xu <herbert@gondor.apana.org.au>
Thu, 8 May 2025 05:22:16 +0000 (13:22 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 27 Jun 2025 10:04:11 +0000 (11:04 +0100)
commit 0413bcf0fc460a68a2a7a8354aee833293d7d693 upstream.

This driver tries to chain requests together before submitting them
to hardware in order to reduce completion interrupts.

However, it even extends chains that have already been submitted
to hardware.  This is dangerous because there is no way of knowing
whether the hardware has already read the DMA memory in question
or not.

Fix this by splitting the chain list into two.  One for submitted
requests and one for requests that have not yet been submitted.
Only extend the latter.

Reported-by: Klaus Kudielka <klaus.kudielka@gmail.com>
Fixes: 85030c5168f1 ("crypto: marvell - Add support for chaining crypto requests in TDMA mode")
Cc: <stable@vger.kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/crypto/marvell/cesa/cesa.c
drivers/crypto/marvell/cesa/cesa.h
drivers/crypto/marvell/cesa/tdma.c

index 06211858bf2e7f7353235c5c7e34ab46e3081350..967338426959af731982298292e77cdcd9ce1bb4 100644 (file)
@@ -94,7 +94,7 @@ static int mv_cesa_std_process(struct mv_cesa_engine *engine, u32 status)
 
 static int mv_cesa_int_process(struct mv_cesa_engine *engine, u32 status)
 {
-       if (engine->chain.first && engine->chain.last)
+       if (engine->chain_hw.first && engine->chain_hw.last)
                return mv_cesa_tdma_process(engine, status);
 
        return mv_cesa_std_process(engine, status);
index fa56b45620c7962dc45e3efef9792929e1574f38..4051d566359eb07edfcce8e9b6ab99a71674075f 100644 (file)
@@ -439,8 +439,10 @@ struct mv_cesa_dev {
  *                     SRAM
  * @queue:             fifo of the pending crypto requests
  * @load:              engine load counter, useful for load balancing
- * @chain:             list of the current tdma descriptors being processed
- *                     by this engine.
+ * @chain_hw:          list of the current tdma descriptors being processed
+ *                     by the hardware.
+ * @chain_sw:          list of the current tdma descriptors that will be
+ *                     submitted to the hardware.
  * @complete_queue:    fifo of the processed requests by the engine
  *
  * Structure storing CESA engine information.
@@ -459,7 +461,8 @@ struct mv_cesa_engine {
        struct gen_pool *pool;
        struct crypto_queue queue;
        atomic_t load;
-       struct mv_cesa_tdma_chain chain;
+       struct mv_cesa_tdma_chain chain_hw;
+       struct mv_cesa_tdma_chain chain_sw;
        struct list_head complete_queue;
        int irq;
 };
index 5d9c48fb72b2c2832dce8714ec35be60ed662656..9619c9e886aa82c6835960c657ddabc69e3917c3 100644 (file)
@@ -38,6 +38,15 @@ void mv_cesa_dma_step(struct mv_cesa_req *dreq)
 {
        struct mv_cesa_engine *engine = dreq->engine;
 
+       spin_lock_bh(&engine->lock);
+       if (engine->chain_sw.first == dreq->chain.first) {
+               engine->chain_sw.first = NULL;
+               engine->chain_sw.last = NULL;
+       }
+       engine->chain_hw.first = dreq->chain.first;
+       engine->chain_hw.last = dreq->chain.last;
+       spin_unlock_bh(&engine->lock);
+
        writel_relaxed(0, engine->regs + CESA_SA_CFG);
 
        mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE);
@@ -96,25 +105,27 @@ void mv_cesa_dma_prepare(struct mv_cesa_req *dreq,
 void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
                        struct mv_cesa_req *dreq)
 {
-       if (engine->chain.first == NULL && engine->chain.last == NULL) {
-               engine->chain.first = dreq->chain.first;
-               engine->chain.last  = dreq->chain.last;
-       } else {
-               struct mv_cesa_tdma_desc *last;
+       struct mv_cesa_tdma_desc *last = engine->chain_sw.last;
 
-               last = engine->chain.last;
+       /*
+        * Break the DMA chain if the request being queued needs the IV
+        * regs to be set before lauching the request.
+        */
+       if (!last || dreq->chain.first->flags & CESA_TDMA_SET_STATE)
+               engine->chain_sw.first = dreq->chain.first;
+       else {
                last->next = dreq->chain.first;
-               engine->chain.last = dreq->chain.last;
-
-               /*
-                * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
-                * the last element of the current chain, or if the request
-                * being queued needs the IV regs to be set before lauching
-                * the request.
-                */
-               if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
-                   !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
-                       last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+               last->next_dma = cpu_to_le32(dreq->chain.first->cur_dma);
+       }
+       last = dreq->chain.last;
+       engine->chain_sw.last = last;
+       /*
+        * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+        * the last element of the current chain.
+        */
+       if (last->flags & CESA_TDMA_BREAK_CHAIN) {
+               engine->chain_sw.first = NULL;
+               engine->chain_sw.last = NULL;
        }
 }
 
@@ -127,7 +138,7 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
 
        tdma_cur = readl(engine->regs + CESA_TDMA_CUR);
 
-       for (tdma = engine->chain.first; tdma; tdma = next) {
+       for (tdma = engine->chain_hw.first; tdma; tdma = next) {
                spin_lock_bh(&engine->lock);
                next = tdma->next;
                spin_unlock_bh(&engine->lock);
@@ -149,12 +160,12 @@ int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status)
                                                                 &backlog);
 
                        /* Re-chaining to the next request */
-                       engine->chain.first = tdma->next;
+                       engine->chain_hw.first = tdma->next;
                        tdma->next = NULL;
 
                        /* If this is the last request, clear the chain */
-                       if (engine->chain.first == NULL)
-                               engine->chain.last  = NULL;
+                       if (engine->chain_hw.first == NULL)
+                               engine->chain_hw.last  = NULL;
                        spin_unlock_bh(&engine->lock);
 
                        ctx = crypto_tfm_ctx(req->tfm);