]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
lib/crypto: s390/aes: Migrate optimized code into library
authorEric Biggers <ebiggers@kernel.org>
Mon, 12 Jan 2026 19:20:13 +0000 (11:20 -0800)
committerEric Biggers <ebiggers@kernel.org>
Thu, 15 Jan 2026 22:08:55 +0000 (14:08 -0800)
Implement aes_preparekey_arch(), aes_encrypt_arch(), and
aes_decrypt_arch() using the CPACF AES instructions.

Then, remove the superseded "aes-s390" crypto_cipher.

The result is that both the AES library and crypto_cipher APIs use the
CPACF AES instructions, whereas previously only crypto_cipher did (and
it wasn't enabled by default, which this commit fixes as well).

Note that this preserves the optimization where the AES key is stored in
raw form rather than expanded form.  CPACF just takes the raw key.

Acked-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Holger Dengler <dengler@linux.ibm.com>
Reviewed-by: Holger Dengler <dengler@linux.ibm.com>
Link: https://lore.kernel.org/r/20260112192035.10427-16-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
arch/s390/crypto/Kconfig
arch/s390/crypto/aes_s390.c
include/crypto/aes.h
lib/crypto/Kconfig
lib/crypto/s390/aes.h [new file with mode: 0644]

index f838ca055f6d78b1f3624e2ae8fb256d2ec9c3b0..79a2d0034258b6bcceb0b6f83f04ba9c8885303d 100644 (file)
@@ -14,10 +14,8 @@ config CRYPTO_GHASH_S390
 
 config CRYPTO_AES_S390
        tristate "Ciphers: AES, modes: ECB, CBC, CTR, XTS, GCM"
-       select CRYPTO_ALGAPI
        select CRYPTO_SKCIPHER
        help
-         Block cipher: AES cipher algorithms (FIPS 197)
          AEAD cipher: AES with GCM
          Length-preserving ciphers: AES with ECB, CBC, XTS, and CTR modes
 
index d0a2954356805dfdac98e0a5c2fc10af4a67993a..62edc66d54788eaa3214b6629bf589f441240b4b 100644 (file)
@@ -20,7 +20,6 @@
 #include <crypto/algapi.h>
 #include <crypto/ghash.h>
 #include <crypto/internal/aead.h>
-#include <crypto/internal/cipher.h>
 #include <crypto/internal/skcipher.h>
 #include <crypto/scatterwalk.h>
 #include <linux/err.h>
@@ -45,7 +44,6 @@ struct s390_aes_ctx {
        unsigned long fc;
        union {
                struct crypto_skcipher *skcipher;
-               struct crypto_cipher *cip;
        } fallback;
 };
 
@@ -72,109 +70,6 @@ struct gcm_sg_walk {
        unsigned int nbytes;
 };
 
-static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
-               unsigned int key_len)
-{
-       struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
-       sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
-       sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
-                       CRYPTO_TFM_REQ_MASK);
-
-       return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
-}
-
-static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
-                      unsigned int key_len)
-{
-       struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-       unsigned long fc;
-
-       /* Pick the correct function code based on the key length */
-       fc = (key_len == 16) ? CPACF_KM_AES_128 :
-            (key_len == 24) ? CPACF_KM_AES_192 :
-            (key_len == 32) ? CPACF_KM_AES_256 : 0;
-
-       /* Check if the function code is available */
-       sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
-       if (!sctx->fc)
-               return setkey_fallback_cip(tfm, in_key, key_len);
-
-       sctx->key_len = key_len;
-       memcpy(sctx->key, in_key, key_len);
-       return 0;
-}
-
-static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
-       struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
-       if (unlikely(!sctx->fc)) {
-               crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
-               return;
-       }
-       cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
-}
-
-static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
-       struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
-       if (unlikely(!sctx->fc)) {
-               crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
-               return;
-       }
-       cpacf_km(sctx->fc | CPACF_DECRYPT,
-                &sctx->key, out, in, AES_BLOCK_SIZE);
-}
-
-static int fallback_init_cip(struct crypto_tfm *tfm)
-{
-       const char *name = tfm->__crt_alg->cra_name;
-       struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
-       sctx->fallback.cip = crypto_alloc_cipher(name, 0,
-                                                CRYPTO_ALG_NEED_FALLBACK);
-
-       if (IS_ERR(sctx->fallback.cip)) {
-               pr_err("Allocating AES fallback algorithm %s failed\n",
-                      name);
-               return PTR_ERR(sctx->fallback.cip);
-       }
-
-       return 0;
-}
-
-static void fallback_exit_cip(struct crypto_tfm *tfm)
-{
-       struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
-       crypto_free_cipher(sctx->fallback.cip);
-       sctx->fallback.cip = NULL;
-}
-
-static struct crypto_alg aes_alg = {
-       .cra_name               =       "aes",
-       .cra_driver_name        =       "aes-s390",
-       .cra_priority           =       300,
-       .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER |
-                                       CRYPTO_ALG_NEED_FALLBACK,
-       .cra_blocksize          =       AES_BLOCK_SIZE,
-       .cra_ctxsize            =       sizeof(struct s390_aes_ctx),
-       .cra_module             =       THIS_MODULE,
-       .cra_init               =       fallback_init_cip,
-       .cra_exit               =       fallback_exit_cip,
-       .cra_u                  =       {
-               .cipher = {
-                       .cia_min_keysize        =       AES_MIN_KEY_SIZE,
-                       .cia_max_keysize        =       AES_MAX_KEY_SIZE,
-                       .cia_setkey             =       aes_set_key,
-                       .cia_encrypt            =       crypto_aes_encrypt,
-                       .cia_decrypt            =       crypto_aes_decrypt,
-               }
-       }
-};
-
 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
                                    unsigned int len)
 {
@@ -1049,7 +944,6 @@ static struct aead_alg gcm_aes_aead = {
        },
 };
 
-static struct crypto_alg *aes_s390_alg;
 static struct skcipher_alg *aes_s390_skcipher_algs[5];
 static int aes_s390_skciphers_num;
 static struct aead_alg *aes_s390_aead_alg;
@@ -1066,8 +960,6 @@ static int aes_s390_register_skcipher(struct skcipher_alg *alg)
 
 static void aes_s390_fini(void)
 {
-       if (aes_s390_alg)
-               crypto_unregister_alg(aes_s390_alg);
        while (aes_s390_skciphers_num--)
                crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
        if (ctrblk)
@@ -1090,10 +982,6 @@ static int __init aes_s390_init(void)
        if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
            cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
            cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
-               ret = crypto_register_alg(&aes_alg);
-               if (ret)
-                       goto out_err;
-               aes_s390_alg = &aes_alg;
                ret = aes_s390_register_skcipher(&ecb_aes_alg);
                if (ret)
                        goto out_err;
@@ -1156,4 +1044,3 @@ MODULE_ALIAS_CRYPTO("aes-all");
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
index bff71cfaedeb7cba95f7b03de55b6349b3a09896..19fd99f383fb4336c48ab4946ae06ec64aa21885 100644 (file)
@@ -46,6 +46,9 @@ union aes_enckey_arch {
         * overlap rndkeys) is set to 0 to differentiate the two formats.
         */
        struct p8_aes_key p8;
+#elif defined(CONFIG_S390)
+       /* Used when the CPU supports CPACF AES for this key's length */
+       u8 raw_key[AES_MAX_KEY_SIZE];
 #endif
 #endif /* CONFIG_CRYPTO_LIB_AES_ARCH */
 };
index 2690b5ffc5ca97b6808b5f32f3f05e4261cfb1a7..56a9b4f53b0e8ff7ffd7e616a70f33c7a2e7a249 100644 (file)
@@ -19,6 +19,7 @@ config CRYPTO_LIB_AES_ARCH
        default y if PPC && (SPE || (PPC64 && VSX))
        default y if RISCV && 64BIT && TOOLCHAIN_HAS_VECTOR_CRYPTO && \
                     RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS
+       default y if S390
 
 config CRYPTO_LIB_AESCFB
        tristate
diff --git a/lib/crypto/s390/aes.h b/lib/crypto/s390/aes.h
new file mode 100644 (file)
index 0000000..5466f6e
--- /dev/null
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AES optimized using the CP Assist for Cryptographic Functions (CPACF)
+ *
+ * Copyright 2026 Google LLC
+ */
+#include <asm/cpacf.h>
+#include <linux/cpufeature.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes128);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes192);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes256);
+
+/*
+ * When the CPU supports CPACF AES for the requested key length, we need only
+ * save a copy of the raw AES key, as that's what the CPACF instructions need.
+ *
+ * When unsupported, fall back to the generic key expansion and en/decryption.
+ */
+static void aes_preparekey_arch(union aes_enckey_arch *k,
+                               union aes_invkey_arch *inv_k,
+                               const u8 *in_key, int key_len, int nrounds)
+{
+       if (key_len == AES_KEYSIZE_128) {
+               if (static_branch_likely(&have_cpacf_aes128)) {
+                       memcpy(k->raw_key, in_key, AES_KEYSIZE_128);
+                       return;
+               }
+       } else if (key_len == AES_KEYSIZE_192) {
+               if (static_branch_likely(&have_cpacf_aes192)) {
+                       memcpy(k->raw_key, in_key, AES_KEYSIZE_192);
+                       return;
+               }
+       } else {
+               if (static_branch_likely(&have_cpacf_aes256)) {
+                       memcpy(k->raw_key, in_key, AES_KEYSIZE_256);
+                       return;
+               }
+       }
+       aes_expandkey_generic(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL,
+                             in_key, key_len);
+}
+
+static inline bool aes_crypt_s390(const struct aes_enckey *key,
+                                 u8 out[AES_BLOCK_SIZE],
+                                 const u8 in[AES_BLOCK_SIZE], int decrypt)
+{
+       if (key->len == AES_KEYSIZE_128) {
+               if (static_branch_likely(&have_cpacf_aes128)) {
+                       cpacf_km(CPACF_KM_AES_128 | decrypt,
+                                (void *)key->k.raw_key, out, in,
+                                AES_BLOCK_SIZE);
+                       return true;
+               }
+       } else if (key->len == AES_KEYSIZE_192) {
+               if (static_branch_likely(&have_cpacf_aes192)) {
+                       cpacf_km(CPACF_KM_AES_192 | decrypt,
+                                (void *)key->k.raw_key, out, in,
+                                AES_BLOCK_SIZE);
+                       return true;
+               }
+       } else {
+               if (static_branch_likely(&have_cpacf_aes256)) {
+                       cpacf_km(CPACF_KM_AES_256 | decrypt,
+                                (void *)key->k.raw_key, out, in,
+                                AES_BLOCK_SIZE);
+                       return true;
+               }
+       }
+       return false;
+}
+
+static void aes_encrypt_arch(const struct aes_enckey *key,
+                            u8 out[AES_BLOCK_SIZE],
+                            const u8 in[AES_BLOCK_SIZE])
+{
+       if (likely(aes_crypt_s390(key, out, in, 0)))
+               return;
+       aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in);
+}
+
+static void aes_decrypt_arch(const struct aes_key *key,
+                            u8 out[AES_BLOCK_SIZE],
+                            const u8 in[AES_BLOCK_SIZE])
+{
+       if (likely(aes_crypt_s390((const struct aes_enckey *)key, out, in,
+                                 CPACF_DECRYPT)))
+               return;
+       aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds, out, in);
+}
+
+#define aes_mod_init_arch aes_mod_init_arch
+static void aes_mod_init_arch(void)
+{
+       if (cpu_have_feature(S390_CPU_FEATURE_MSA)) {
+               cpacf_mask_t km_functions;
+
+               cpacf_query(CPACF_KM, &km_functions);
+               if (cpacf_test_func(&km_functions, CPACF_KM_AES_128))
+                       static_branch_enable(&have_cpacf_aes128);
+               if (cpacf_test_func(&km_functions, CPACF_KM_AES_192))
+                       static_branch_enable(&have_cpacf_aes192);
+               if (cpacf_test_func(&km_functions, CPACF_KM_AES_256))
+                       static_branch_enable(&have_cpacf_aes256);
+       }
+}