]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
crypto: skcipher - Use restrict rather than hand-rolling accesses
authorHerbert Xu <herbert@gondor.apana.org.au>
Sun, 23 Feb 2025 06:27:51 +0000 (14:27 +0800)
committerHerbert Xu <herbert@gondor.apana.org.au>
Sun, 2 Mar 2025 07:21:47 +0000 (15:21 +0800)
Rather than accessing 'alg' directly to avoid the aliasing issue
which leads to unnecessary reloads, use the __restrict keyword
to explicitly tell the compiler that there is no aliasing.

This generates equivalent if not superior code on x86 with gcc 12.

Note that in skcipher_walk_virt the alg assignment is moved after
might_sleep_if because that function is a compiler barrier and
forces a reload.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
crypto/skcipher.c
include/crypto/internal/skcipher.h

index b0e1f3c12cdef50ab20ed076fac29a25d43d7778..53123d3685d5dbb823ff5fff83970e5d7f986f55 100644 (file)
@@ -293,14 +293,16 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
        return skcipher_walk_next(walk);
 }
 
-int skcipher_walk_virt(struct skcipher_walk *walk,
-                      struct skcipher_request *req, bool atomic)
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+                      struct skcipher_request *__restrict req, bool atomic)
 {
-       const struct skcipher_alg *alg =
-               crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
+       struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+       struct skcipher_alg *alg;
 
        might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
 
+       alg = crypto_skcipher_alg(tfm);
+
        walk->total = req->cryptlen;
        walk->nbytes = 0;
        walk->iv = req->iv;
@@ -316,14 +318,9 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
        scatterwalk_start(&walk->in, req->src);
        scatterwalk_start(&walk->out, req->dst);
 
-       /*
-        * Accessing 'alg' directly generates better code than using the
-        * crypto_skcipher_blocksize() and similar helper functions here, as it
-        * prevents the algorithm pointer from being repeatedly reloaded.
-        */
-       walk->blocksize = alg->base.cra_blocksize;
-       walk->ivsize = alg->co.ivsize;
-       walk->alignmask = alg->base.cra_alignmask;
+       walk->blocksize = crypto_skcipher_blocksize(tfm);
+       walk->ivsize = crypto_skcipher_ivsize(tfm);
+       walk->alignmask = crypto_skcipher_alignmask(tfm);
 
        if (alg->co.base.cra_type != &crypto_skcipher_type)
                walk->stride = alg->co.chunksize;
@@ -334,10 +331,11 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
 }
 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
 
-static int skcipher_walk_aead_common(struct skcipher_walk *walk,
-                                    struct aead_request *req, bool atomic)
+static int skcipher_walk_aead_common(struct skcipher_walk *__restrict walk,
+                                    struct aead_request *__restrict req,
+                                    bool atomic)
 {
-       const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
+       struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 
        walk->nbytes = 0;
        walk->iv = req->iv;
@@ -353,21 +351,17 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
        scatterwalk_start_at_pos(&walk->in, req->src, req->assoclen);
        scatterwalk_start_at_pos(&walk->out, req->dst, req->assoclen);
 
-       /*
-        * Accessing 'alg' directly generates better code than using the
-        * crypto_aead_blocksize() and similar helper functions here, as it
-        * prevents the algorithm pointer from being repeatedly reloaded.
-        */
-       walk->blocksize = alg->base.cra_blocksize;
-       walk->stride = alg->chunksize;
-       walk->ivsize = alg->ivsize;
-       walk->alignmask = alg->base.cra_alignmask;
+       walk->blocksize = crypto_aead_blocksize(tfm);
+       walk->stride = crypto_aead_chunksize(tfm);
+       walk->ivsize = crypto_aead_ivsize(tfm);
+       walk->alignmask = crypto_aead_alignmask(tfm);
 
        return skcipher_walk_first(walk);
 }
 
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
-                              struct aead_request *req, bool atomic)
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+                              struct aead_request *__restrict req,
+                              bool atomic)
 {
        walk->total = req->cryptlen;
 
@@ -375,8 +369,9 @@ int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
 }
 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
 
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
-                              struct aead_request *req, bool atomic)
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+                              struct aead_request *__restrict req,
+                              bool atomic)
 {
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
 
index 4f49621d3eb6191a23ac9fb8f2e5ff49abcdf978..d6ae7a86fed23fc284c160e5bb4eba681623ee35 100644 (file)
@@ -197,13 +197,15 @@ int lskcipher_register_instance(struct crypto_template *tmpl,
                                struct lskcipher_instance *inst);
 
 int skcipher_walk_done(struct skcipher_walk *walk, int res);
-int skcipher_walk_virt(struct skcipher_walk *walk,
-                      struct skcipher_request *req,
+int skcipher_walk_virt(struct skcipher_walk *__restrict walk,
+                      struct skcipher_request *__restrict req,
                       bool atomic);
-int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
-                              struct aead_request *req, bool atomic);
-int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
-                              struct aead_request *req, bool atomic);
+int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk,
+                              struct aead_request *__restrict req,
+                              bool atomic);
+int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk,
+                              struct aead_request *__restrict req,
+                              bool atomic);
 
 static inline void skcipher_walk_abort(struct skcipher_walk *walk)
 {