#include <crypto/algapi.h>
#include <crypto/ghash.h>
#include <crypto/internal/aead.h>
-#include <crypto/internal/cipher.h>
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
unsigned long fc;
union {
struct crypto_skcipher *skcipher;
- struct crypto_cipher *cip;
} fallback;
};
unsigned int nbytes;
};
-static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
-{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
- sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
- sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
- CRYPTO_TFM_REQ_MASK);
-
- return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
-}
-
-static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
- unsigned int key_len)
-{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
- unsigned long fc;
-
- /* Pick the correct function code based on the key length */
- fc = (key_len == 16) ? CPACF_KM_AES_128 :
- (key_len == 24) ? CPACF_KM_AES_192 :
- (key_len == 32) ? CPACF_KM_AES_256 : 0;
-
- /* Check if the function code is available */
- sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
- if (!sctx->fc)
- return setkey_fallback_cip(tfm, in_key, key_len);
-
- sctx->key_len = key_len;
- memcpy(sctx->key, in_key, key_len);
- return 0;
-}
-
-static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
- if (unlikely(!sctx->fc)) {
- crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
- return;
- }
- cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
-}
-
-static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
- if (unlikely(!sctx->fc)) {
- crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
- return;
- }
- cpacf_km(sctx->fc | CPACF_DECRYPT,
- &sctx->key, out, in, AES_BLOCK_SIZE);
-}
-
-static int fallback_init_cip(struct crypto_tfm *tfm)
-{
- const char *name = tfm->__crt_alg->cra_name;
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
- sctx->fallback.cip = crypto_alloc_cipher(name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
-
- if (IS_ERR(sctx->fallback.cip)) {
- pr_err("Allocating AES fallback algorithm %s failed\n",
- name);
- return PTR_ERR(sctx->fallback.cip);
- }
-
- return 0;
-}
-
-static void fallback_exit_cip(struct crypto_tfm *tfm)
-{
- struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
-
- crypto_free_cipher(sctx->fallback.cip);
- sctx->fallback.cip = NULL;
-}
-
-static struct crypto_alg aes_alg = {
- .cra_name = "aes",
- .cra_driver_name = "aes-s390",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
- CRYPTO_ALG_NEED_FALLBACK,
- .cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct s390_aes_ctx),
- .cra_module = THIS_MODULE,
- .cra_init = fallback_init_cip,
- .cra_exit = fallback_exit_cip,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = AES_MIN_KEY_SIZE,
- .cia_max_keysize = AES_MAX_KEY_SIZE,
- .cia_setkey = aes_set_key,
- .cia_encrypt = crypto_aes_encrypt,
- .cia_decrypt = crypto_aes_decrypt,
- }
- }
-};
-
static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
unsigned int len)
{
},
};
-static struct crypto_alg *aes_s390_alg;
static struct skcipher_alg *aes_s390_skcipher_algs[5];
static int aes_s390_skciphers_num;
static struct aead_alg *aes_s390_aead_alg;
static void aes_s390_fini(void)
{
- if (aes_s390_alg)
- crypto_unregister_alg(aes_s390_alg);
while (aes_s390_skciphers_num--)
crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
if (ctrblk)
if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
- ret = crypto_register_alg(&aes_alg);
- if (ret)
- goto out_err;
- aes_s390_alg = &aes_alg;
ret = aes_s390_register_skcipher(&ecb_aes_alg);
if (ret)
goto out_err;
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("GPL");
-MODULE_IMPORT_NS("CRYPTO_INTERNAL");
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * AES optimized using the CP Assist for Cryptographic Functions (CPACF)
+ *
+ * Copyright 2026 Google LLC
+ */
+#include <asm/cpacf.h>
+#include <linux/cpufeature.h>
+
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes128);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes192);
+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_cpacf_aes256);
+
+/*
+ * When the CPU supports CPACF AES for the requested key length, we need only
+ * save a copy of the raw AES key, as that's what the CPACF instructions need.
+ *
+ * When unsupported, fall back to the generic key expansion and en/decryption.
+ */
+static void aes_preparekey_arch(union aes_enckey_arch *k,
+ union aes_invkey_arch *inv_k,
+ const u8 *in_key, int key_len, int nrounds)
+{
+ if (key_len == AES_KEYSIZE_128) {
+ if (static_branch_likely(&have_cpacf_aes128)) {
+ memcpy(k->raw_key, in_key, AES_KEYSIZE_128);
+ return;
+ }
+ } else if (key_len == AES_KEYSIZE_192) {
+ if (static_branch_likely(&have_cpacf_aes192)) {
+ memcpy(k->raw_key, in_key, AES_KEYSIZE_192);
+ return;
+ }
+ } else {
+ if (static_branch_likely(&have_cpacf_aes256)) {
+ memcpy(k->raw_key, in_key, AES_KEYSIZE_256);
+ return;
+ }
+ }
+ aes_expandkey_generic(k->rndkeys, inv_k ? inv_k->inv_rndkeys : NULL,
+ in_key, key_len);
+}
+
+static inline bool aes_crypt_s390(const struct aes_enckey *key,
+ u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE], int decrypt)
+{
+ if (key->len == AES_KEYSIZE_128) {
+ if (static_branch_likely(&have_cpacf_aes128)) {
+ cpacf_km(CPACF_KM_AES_128 | decrypt,
+ (void *)key->k.raw_key, out, in,
+ AES_BLOCK_SIZE);
+ return true;
+ }
+ } else if (key->len == AES_KEYSIZE_192) {
+ if (static_branch_likely(&have_cpacf_aes192)) {
+ cpacf_km(CPACF_KM_AES_192 | decrypt,
+ (void *)key->k.raw_key, out, in,
+ AES_BLOCK_SIZE);
+ return true;
+ }
+ } else {
+ if (static_branch_likely(&have_cpacf_aes256)) {
+ cpacf_km(CPACF_KM_AES_256 | decrypt,
+ (void *)key->k.raw_key, out, in,
+ AES_BLOCK_SIZE);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void aes_encrypt_arch(const struct aes_enckey *key,
+ u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
+{
+ if (likely(aes_crypt_s390(key, out, in, 0)))
+ return;
+ aes_encrypt_generic(key->k.rndkeys, key->nrounds, out, in);
+}
+
+static void aes_decrypt_arch(const struct aes_key *key,
+ u8 out[AES_BLOCK_SIZE],
+ const u8 in[AES_BLOCK_SIZE])
+{
+ if (likely(aes_crypt_s390((const struct aes_enckey *)key, out, in,
+ CPACF_DECRYPT)))
+ return;
+ aes_decrypt_generic(key->inv_k.inv_rndkeys, key->nrounds, out, in);
+}
+
+#define aes_mod_init_arch aes_mod_init_arch
+static void aes_mod_init_arch(void)
+{
+ if (cpu_have_feature(S390_CPU_FEATURE_MSA)) {
+ cpacf_mask_t km_functions;
+
+ cpacf_query(CPACF_KM, &km_functions);
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_128))
+ static_branch_enable(&have_cpacf_aes128);
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_192))
+ static_branch_enable(&have_cpacf_aes192);
+ if (cpacf_test_func(&km_functions, CPACF_KM_AES_256))
+ static_branch_enable(&have_cpacf_aes256);
+ }
+}