]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
crypto: engine - remove request batching support
authorOvidiu Panait <ovidiu.panait.oss@gmail.com>
Fri, 11 Jul 2025 18:29:31 +0000 (21:29 +0300)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 18 Jul 2025 10:52:00 +0000 (20:52 +1000)
Remove request batching support from crypto_engine, as there are no
drivers using this feature and it doesn't really work that well.

Instead of doing batching based on backlog, a more optimal approach
would be for the user to handle the batching (similar to how IPsec
can hook into GSO to get 64K of data each time or how block encryption
can use unit sizes much greater than 4K).

Suggested-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: Ovidiu Panait <ovidiu.panait.oss@gmail.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/s390/crypto/paes_s390.c
arch/s390/crypto/phmac_s390.c
crypto/crypto_engine.c
drivers/crypto/caam/jr.c
drivers/crypto/virtio/virtio_crypto_core.c
include/crypto/engine.h
include/crypto/internal/engine.h

index 8a340c16acb46a51222d8f8a9008f86b60ec910b..a624a43a2b5400d2eeb63c6f05a3662bb299aca1 100644 (file)
@@ -1633,7 +1633,7 @@ static int __init paes_s390_init(void)
        /* with this pseudo devie alloc and start a crypto engine */
        paes_crypto_engine =
                crypto_engine_alloc_init_and_set(paes_dev.this_device,
-                                                true, NULL, false, MAX_QLEN);
+                                                true, false, MAX_QLEN);
        if (!paes_crypto_engine) {
                rc = -ENOMEM;
                goto out_err;
index 90602f72108fcad6349f8a9ef9646590a1f32817..7ecfdc4fba2d0af09bdbdabbc00a103a682d7f14 100644 (file)
@@ -1006,7 +1006,7 @@ static int __init s390_phmac_init(void)
        /* with this pseudo device alloc and start a crypto engine */
        phmac_crypto_engine =
                crypto_engine_alloc_init_and_set(phmac_dev.this_device,
-                                                true, NULL, false, MAX_QLEN);
+                                                true, false, MAX_QLEN);
        if (!phmac_crypto_engine) {
                rc = -ENOMEM;
                goto out_err;
index 445d3c113ee105b4d2e065491f40b40f49258cd4..8a2400f240d4ed011c510bb5ebed44a26c89d6ec 100644 (file)
@@ -195,17 +195,6 @@ retry:
 out:
        spin_unlock_irqrestore(&engine->queue_lock, flags);
 
-       /*
-        * Batch requests is possible only if
-        * hardware can enqueue multiple requests
-        */
-       if (engine->do_batch_requests) {
-               ret = engine->do_batch_requests(engine);
-               if (ret)
-                       dev_err(engine->dev, "failed to do batch requests: %d\n",
-                               ret);
-       }
-
        return;
 }
 
@@ -462,12 +451,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
  * crypto-engine queue.
  * @dev: the device attached with one hardware engine
  * @retry_support: whether hardware has support for retry mechanism
- * @cbk_do_batch: pointer to a callback function to be invoked when executing
- *                a batch of requests.
- *                This has the form:
- *                callback(struct crypto_engine *engine)
- *                where:
- *                engine: the crypto engine structure.
  * @rt: whether this queue is set to run as a realtime task
  * @qlen: maximum size of the crypto-engine queue
  *
@@ -476,7 +459,6 @@ EXPORT_SYMBOL_GPL(crypto_engine_stop);
  */
 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
                                                       bool retry_support,
-                                                      int (*cbk_do_batch)(struct crypto_engine *engine),
                                                       bool rt, int qlen)
 {
        struct crypto_engine *engine;
@@ -495,11 +477,6 @@ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
        engine->idling = false;
        engine->retry_support = retry_support;
        engine->priv_data = dev;
-       /*
-        * Batch requests is possible only if
-        * hardware has support for retry mechanism.
-        */
-       engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
 
        snprintf(engine->name, sizeof(engine->name),
                 "%s-engine", dev_name(dev));
@@ -534,7 +511,7 @@ EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
  */
 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
 {
-       return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
+       return crypto_engine_alloc_init_and_set(dev, false, rt,
                                                CRYPTO_ENGINE_MAX_QLEN);
 }
 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
index 9fcdb64084accd0b02eeedd23d64e9b2e843222c..0ef00df9730e58520cc35d32042889d96632969b 100644 (file)
@@ -629,8 +629,7 @@ static int caam_jr_probe(struct platform_device *pdev)
        }
 
        /* Initialize crypto engine */
-       jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
-                                                         false,
+       jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, false,
                                                          CRYPTO_ENGINE_MAX_QLEN);
        if (!jrpriv->engine) {
                dev_err(jrdev, "Could not init crypto-engine\n");
index 0d522049f595bca7793d6ff352dc7156f609575f..3d241446099cc9d52c6c43fd513c19b10d513581 100644 (file)
@@ -139,7 +139,7 @@ static int virtcrypto_find_vqs(struct virtio_crypto *vi)
                spin_lock_init(&vi->data_vq[i].lock);
                vi->data_vq[i].vq = vqs[i];
                /* Initialize crypto engine */
-               vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, NULL, true,
+               vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, true, true,
                                                virtqueue_get_vring_size(vqs[i]));
                if (!vi->data_vq[i].engine) {
                        ret = -ENOMEM;
index 545dbefe3e13c6b790174c82590170100a6ae56d..2e60344437dafb0107a2b4d9555a06af70704a61 100644 (file)
@@ -76,7 +76,6 @@ int crypto_engine_stop(struct crypto_engine *engine);
 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
                                                       bool retry_support,
-                                                      int (*cbk_do_batch)(struct crypto_engine *engine),
                                                       bool rt, int qlen);
 void crypto_engine_exit(struct crypto_engine *engine);
 
index b6a4ea2240fcaaf0de94f3228383059412c01917..8da1a13619c9f667eba29981a0b3d3aa55fb05f7 100644 (file)
@@ -37,8 +37,6 @@ struct device;
  * @unprepare_crypt_hardware: there are currently no more requests on the
  * queue so the subsystem notifies the driver that it may relax the
  * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
  * @kworker: kthread worker struct for request pump
  * @pump_requests: work struct for scheduling work to the request pump
  * @priv_data: the engine private data
@@ -60,8 +58,6 @@ struct crypto_engine {
 
        int (*prepare_crypt_hardware)(struct crypto_engine *engine);
        int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
-       int (*do_batch_requests)(struct crypto_engine *engine);
-
 
        struct kthread_worker           *kworker;
        struct kthread_work             pump_requests;