]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
crypto: acomp - Add request chaining and virtual addresses
authorHerbert Xu <herbert@gondor.apana.org.au>
Sun, 9 Mar 2025 02:43:21 +0000 (10:43 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Sat, 15 Mar 2025 08:21:23 +0000 (16:21 +0800)
This adds request chaining and virtual address support to the
acomp interface.

It is identical to the ahash interface, except that a new flag
CRYPTO_ACOMP_REQ_NONDMA has been added to indicate that the
virtual addresses are not suitable for DMA.  This is because
all existing and potential acomp users can provide memory that
is suitable for DMA so there is no need for a fall-back copy
path.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
crypto/acompress.c
include/crypto/acompress.h
include/crypto/internal/acompress.h

index ef36ec31d73db25dbc94aa1b928274dd6faa0e48..45444e99a9dbcd767026ef5b075b9d3c6f51c539 100644 (file)
@@ -23,6 +23,8 @@ struct crypto_scomp;
 
 static const struct crypto_type crypto_acomp_type;
 
+static void acomp_reqchain_done(void *data, int err);
+
 static inline struct acomp_alg *__crypto_acomp_alg(struct crypto_alg *alg)
 {
        return container_of(alg, struct acomp_alg, calg.base);
@@ -123,6 +125,201 @@ struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
 }
 EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
 
+static bool acomp_request_has_nondma(struct acomp_req *req)
+{
+       struct acomp_req *r2;
+
+       if (acomp_request_isnondma(req))
+               return true;
+
+       list_for_each_entry(r2, &req->base.list, base.list)
+               if (acomp_request_isnondma(r2))
+                       return true;
+
+       return false;
+}
+
+static void acomp_save_req(struct acomp_req *req, crypto_completion_t cplt)
+{
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+       struct acomp_req_chain *state = &req->chain;
+
+       if (!acomp_is_async(tfm))
+               return;
+
+       state->compl = req->base.complete;
+       state->data = req->base.data;
+       req->base.complete = cplt;
+       req->base.data = state;
+       state->req0 = req;
+}
+
+static void acomp_restore_req(struct acomp_req_chain *state)
+{
+       struct acomp_req *req = state->req0;
+       struct crypto_acomp *tfm;
+
+       tfm = crypto_acomp_reqtfm(req);
+       if (!acomp_is_async(tfm))
+               return;
+
+       req->base.complete = state->compl;
+       req->base.data = state->data;
+}
+
+static void acomp_reqchain_virt(struct acomp_req_chain *state, int err)
+{
+       struct acomp_req *req = state->cur;
+       unsigned int slen = req->slen;
+       unsigned int dlen = req->dlen;
+
+       req->base.err = err;
+       state = &req->chain;
+
+       if (state->src)
+               acomp_request_set_src_dma(req, state->src, slen);
+       if (state->dst)
+               acomp_request_set_dst_dma(req, state->dst, dlen);
+       state->src = NULL;
+       state->dst = NULL;
+}
+
+static void acomp_virt_to_sg(struct acomp_req *req)
+{
+       struct acomp_req_chain *state = &req->chain;
+
+       if (acomp_request_src_isvirt(req)) {
+               unsigned int slen = req->slen;
+               const u8 *svirt = req->svirt;
+
+               state->src = svirt;
+               sg_init_one(&state->ssg, svirt, slen);
+               acomp_request_set_src_sg(req, &state->ssg, slen);
+       }
+
+       if (acomp_request_dst_isvirt(req)) {
+               unsigned int dlen = req->dlen;
+               u8 *dvirt = req->dvirt;
+
+               state->dst = dvirt;
+               sg_init_one(&state->dsg, dvirt, dlen);
+               acomp_request_set_dst_sg(req, &state->dsg, dlen);
+       }
+}
+
+static int acomp_reqchain_finish(struct acomp_req_chain *state,
+                                int err, u32 mask)
+{
+       struct acomp_req *req0 = state->req0;
+       struct acomp_req *req = state->cur;
+       struct acomp_req *n;
+
+       acomp_reqchain_virt(state, err);
+
+       if (req != req0)
+               list_add_tail(&req->base.list, &req0->base.list);
+
+       list_for_each_entry_safe(req, n, &state->head, base.list) {
+               list_del_init(&req->base.list);
+
+               req->base.flags &= mask;
+               req->base.complete = acomp_reqchain_done;
+               req->base.data = state;
+               state->cur = req;
+
+               acomp_virt_to_sg(req);
+               err = state->op(req);
+
+               if (err == -EINPROGRESS) {
+                       if (!list_empty(&state->head))
+                               err = -EBUSY;
+                       goto out;
+               }
+
+               if (err == -EBUSY)
+                       goto out;
+
+               acomp_reqchain_virt(state, err);
+               list_add_tail(&req->base.list, &req0->base.list);
+       }
+
+       acomp_restore_req(state);
+
+out:
+       return err;
+}
+
+static void acomp_reqchain_done(void *data, int err)
+{
+       struct acomp_req_chain *state = data;
+       crypto_completion_t compl = state->compl;
+
+       data = state->data;
+
+       if (err == -EINPROGRESS) {
+               if (!list_empty(&state->head))
+                       return;
+               goto notify;
+       }
+
+       err = acomp_reqchain_finish(state, err, CRYPTO_TFM_REQ_MAY_BACKLOG);
+       if (err == -EBUSY)
+               return;
+
+notify:
+       compl(data, err);
+}
+
+static int acomp_do_req_chain(struct acomp_req *req,
+                             int (*op)(struct acomp_req *req))
+{
+       struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+       struct acomp_req_chain *state = &req->chain;
+       int err;
+
+       if (crypto_acomp_req_chain(tfm) ||
+           (!acomp_request_chained(req) && !acomp_request_isvirt(req)))
+               return op(req);
+
+       /*
+        * There are no in-kernel users that do this.  If and ever
+        * such users come into being then we could add a fall-back
+        * path.
+        */
+       if (acomp_request_has_nondma(req))
+               return -EINVAL;
+
+       if (acomp_is_async(tfm)) {
+               acomp_save_req(req, acomp_reqchain_done);
+               state = req->base.data;
+       }
+
+       state->op = op;
+       state->cur = req;
+       state->src = NULL;
+       INIT_LIST_HEAD(&state->head);
+       list_splice_init(&req->base.list, &state->head);
+
+       acomp_virt_to_sg(req);
+       err = op(req);
+       if (err == -EBUSY || err == -EINPROGRESS)
+               return -EBUSY;
+
+       return acomp_reqchain_finish(state, err, ~0);
+}
+
+int crypto_acomp_compress(struct acomp_req *req)
+{
+       return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->compress);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_compress);
+
+int crypto_acomp_decompress(struct acomp_req *req)
+{
+       return acomp_do_req_chain(req, crypto_acomp_reqtfm(req)->decompress);
+}
+EXPORT_SYMBOL_GPL(crypto_acomp_decompress);
+
 void comp_prepare_alg(struct comp_alg_common *alg)
 {
        struct crypto_alg *base = &alg->base;
index c4937709ad0ea45fdf06297268047ba39527f3f5..c4d8a29274c65d3106dfd4fdd0f81078f6016377 100644 (file)
 #include <linux/compiler_types.h>
 #include <linux/container_of.h>
 #include <linux/crypto.h>
+#include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/spinlock_types.h>
 #include <linux/types.h>
 
 #define CRYPTO_ACOMP_ALLOC_OUTPUT      0x00000001
+
+/* Set this bit if source is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_SRC_VIRT      0x00000002
+
+/* Set this bit for if virtual address source cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_SRC_NONDMA    0x00000004
+
+/* Set this bit if destination is virtual address instead of SG list. */
+#define CRYPTO_ACOMP_REQ_DST_VIRT      0x00000008
+
+/* Set this bit for if virtual address destination cannot be used for DMA. */
+#define CRYPTO_ACOMP_REQ_DST_NONDMA    0x00000010
+
 #define CRYPTO_ACOMP_DST_MAX           131072
 
+struct acomp_req;
+
+struct acomp_req_chain {
+       struct list_head head;
+       struct acomp_req *req0;
+       struct acomp_req *cur;
+       int (*op)(struct acomp_req *req);
+       crypto_completion_t compl;
+       void *data;
+       struct scatterlist ssg;
+       struct scatterlist dsg;
+       const u8 *src;
+       u8 *dst;
+};
+
 /**
  * struct acomp_req - asynchronous (de)compression request
  *
  * @dst:       Destination data
  * @slen:      Size of the input buffer
  * @dlen:      Size of the output buffer and number of bytes produced
+ * @chain:     Private API code data, do not use
  * @__ctx:     Start of private context data
  */
 struct acomp_req {
        struct crypto_async_request base;
-       struct scatterlist *src;
-       struct scatterlist *dst;
+       union {
+               struct scatterlist *src;
+               const u8 *svirt;
+       };
+       union {
+               struct scatterlist *dst;
+               u8 *dvirt;
+       };
        unsigned int slen;
        unsigned int dlen;
+
+       struct acomp_req_chain chain;
+
        void *__ctx[] CRYPTO_MINALIGN_ATTR;
 };
 
@@ -222,10 +261,16 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
                                              crypto_completion_t cmpl,
                                              void *data)
 {
+       u32 keep = CRYPTO_ACOMP_ALLOC_OUTPUT | CRYPTO_ACOMP_REQ_SRC_VIRT |
+                  CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT |
+                  CRYPTO_ACOMP_REQ_DST_NONDMA;
+
        req->base.complete = cmpl;
        req->base.data = data;
-       req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT;
-       req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT;
+       req->base.flags &= keep;
+       req->base.flags |= flgs & ~keep;
+
+       crypto_reqchain_init(&req->base);
 }
 
 /**
@@ -252,11 +297,144 @@ static inline void acomp_request_set_params(struct acomp_req *req,
        req->slen = slen;
        req->dlen = dlen;
 
-       req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
+       req->base.flags &= ~(CRYPTO_ACOMP_ALLOC_OUTPUT |
+                            CRYPTO_ACOMP_REQ_SRC_VIRT |
+                            CRYPTO_ACOMP_REQ_SRC_NONDMA |
+                            CRYPTO_ACOMP_REQ_DST_VIRT |
+                            CRYPTO_ACOMP_REQ_DST_NONDMA);
        if (!req->dst)
                req->base.flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
 }
 
+/**
+ * acomp_request_set_src_sg() -- Sets source scatterlist
+ *
+ * Sets source scatterlist required by an acomp operation.
+ *
+ * @req:       asynchronous compress request
+ * @src:       pointer to input buffer scatterlist
+ * @slen:      size of the input buffer
+ */
+static inline void acomp_request_set_src_sg(struct acomp_req *req,
+                                           struct scatterlist *src,
+                                           unsigned int slen)
+{
+       req->src = src;
+       req->slen = slen;
+
+       req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+       req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_dma() -- Sets DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req:       asynchronous compress request
+ * @src:       virtual address pointer to input buffer
+ * @slen:      size of the input buffer
+ */
+static inline void acomp_request_set_src_dma(struct acomp_req *req,
+                                            const u8 *src, unsigned int slen)
+{
+       req->svirt = src;
+       req->slen = slen;
+
+       req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA;
+       req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address
+ *
+ * Sets source virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req:       asynchronous compress request
+ * @src:       virtual address pointer to input buffer
+ * @slen:      size of the input buffer
+ */
+static inline void acomp_request_set_src_nondma(struct acomp_req *req,
+                                               const u8 *src,
+                                               unsigned int slen)
+{
+       req->svirt = src;
+       req->slen = slen;
+
+       req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA;
+       req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_sg() -- Sets destination scatterlist
+ *
+ * Sets destination scatterlist required by an acomp operation.
+ *
+ * @req:       asynchronous compress request
+ * @dst:       pointer to output buffer scatterlist
+ * @dlen:      size of the output buffer
+ */
+static inline void acomp_request_set_dst_sg(struct acomp_req *req,
+                                           struct scatterlist *dst,
+                                           unsigned int dlen)
+{
+       req->dst = dst;
+       req->dlen = dlen;
+
+       req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+       req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_dma() -- Sets DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address must be usable for DMA.
+ *
+ * @req:       asynchronous compress request
+ * @dst:       virtual address pointer to output buffer
+ * @dlen:      size of the output buffer
+ */
+static inline void acomp_request_set_dst_dma(struct acomp_req *req,
+                                            u8 *dst, unsigned int dlen)
+{
+       req->dvirt = dst;
+       req->dlen = dlen;
+
+       req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
+       req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
+       req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+/**
+ * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address
+ *
+ * Sets destination virtual address required by an acomp operation.
+ * The address can not be used for DMA.
+ *
+ * @req:       asynchronous compress request
+ * @dst:       virtual address pointer to output buffer
+ * @dlen:      size of the output buffer
+ */
+static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
+                                               u8 *dst, unsigned int dlen)
+{
+       req->dvirt = dst;
+       req->dlen = dlen;
+
+       req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
+       req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
+       req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+static inline void acomp_request_chain(struct acomp_req *req,
+                                      struct acomp_req *head)
+{
+       crypto_request_chain(&req->base, &head->base);
+}
+
 /**
  * crypto_acomp_compress() -- Invoke asynchronous compress operation
  *
@@ -266,10 +444,7 @@ static inline void acomp_request_set_params(struct acomp_req *req,
  *
  * Return:     zero on success; error code in case of error
  */
-static inline int crypto_acomp_compress(struct acomp_req *req)
-{
-       return crypto_acomp_reqtfm(req)->compress(req);
-}
+int crypto_acomp_compress(struct acomp_req *req);
 
 /**
  * crypto_acomp_decompress() -- Invoke asynchronous decompress operation
@@ -280,9 +455,6 @@ static inline int crypto_acomp_compress(struct acomp_req *req)
  *
  * Return:     zero on success; error code in case of error
  */
-static inline int crypto_acomp_decompress(struct acomp_req *req)
-{
-       return crypto_acomp_reqtfm(req)->decompress(req);
-}
+int crypto_acomp_decompress(struct acomp_req *req);
 
 #endif
index 4a8f7e3beaa1e36e9fd71560945937ae7ab98bb9..957a5ed7c7f1a89b7869044aa976bf1eb3f74b8a 100644 (file)
@@ -94,4 +94,46 @@ void crypto_unregister_acomp(struct acomp_alg *alg);
 int crypto_register_acomps(struct acomp_alg *algs, int count);
 void crypto_unregister_acomps(struct acomp_alg *algs, int count);
 
+static inline bool acomp_request_chained(struct acomp_req *req)
+{
+       return crypto_request_chained(&req->base);
+}
+
+static inline bool acomp_request_src_isvirt(struct acomp_req *req)
+{
+       return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT;
+}
+
+static inline bool acomp_request_dst_isvirt(struct acomp_req *req)
+{
+       return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT;
+}
+
+static inline bool acomp_request_isvirt(struct acomp_req *req)
+{
+       return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT |
+                                 CRYPTO_ACOMP_REQ_DST_VIRT);
+}
+
+static inline bool acomp_request_src_isnondma(struct acomp_req *req)
+{
+       return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA;
+}
+
+static inline bool acomp_request_dst_isnondma(struct acomp_req *req)
+{
+       return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA;
+}
+
+static inline bool acomp_request_isnondma(struct acomp_req *req)
+{
+       return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA |
+                                 CRYPTO_ACOMP_REQ_DST_NONDMA);
+}
+
+static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm)
+{
+       return crypto_tfm_req_chain(&tfm->base);
+}
+
 #endif