From 280bfc3eb5d3f3199f888197462e7da7e9914160 Mon Sep 17 00:00:00 2001 From: Harsh Jain Date: Sat, 20 Dec 2025 21:29:05 +0530 Subject: [PATCH] crypto: xilinx - Add gcm(aes) support for AMD/Xilinx Versal device Add gcm(aes) algorithm support for AMD/Xilinx Versal devices. Signed-off-by: Harsh Jain Signed-off-by: Herbert Xu --- drivers/crypto/xilinx/zynqmp-aes-gcm.c | 430 ++++++++++++++++++++++++- 1 file changed, 421 insertions(+), 9 deletions(-) diff --git a/drivers/crypto/xilinx/zynqmp-aes-gcm.c b/drivers/crypto/xilinx/zynqmp-aes-gcm.c index 80b0ace7b5caa..53c9cb2224bd5 100644 --- a/drivers/crypto/xilinx/zynqmp-aes-gcm.c +++ b/drivers/crypto/xilinx/zynqmp-aes-gcm.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* * Xilinx ZynqMP AES Driver. - * Copyright (c) 2020 Xilinx Inc. + * Copyright (C) 2020-2022 Xilinx Inc. + * Copyright (C) 2022-2025 Advanced Micro Devices, Inc. */ #include @@ -19,11 +20,13 @@ #include #define ZYNQMP_DMA_BIT_MASK 32U +#define VERSAL_DMA_BIT_MASK 64U #define XILINX_AES_AUTH_SIZE 16U #define XILINX_AES_BLK_SIZE 1U #define ZYNQMP_AES_MIN_INPUT_BLK_SIZE 4U #define ZYNQMP_AES_WORD_LEN 4U +#define VERSAL_AES_QWORD_LEN 16U #define ZYNQMP_AES_GCM_TAG_MISMATCH_ERR 0x01 #define ZYNQMP_AES_WRONG_KEY_SRC_ERR 0x13 #define ZYNQMP_AES_PUF_NOT_PROGRAMMED 0xE300 @@ -49,6 +52,7 @@ struct xilinx_aead_dev { struct xilinx_aead_alg { struct xilinx_aead_dev *aead_dev; struct aead_engine_alg aead; + int (*aes_aead_cipher)(struct aead_request *areq); u8 dma_bit_mask; }; @@ -83,6 +87,54 @@ struct xilinx_aead_req_ctx { static struct xilinx_aead_dev *aead_dev; +enum versal_aead_keysrc { + VERSAL_AES_BBRAM_KEY = 0, + VERSAL_AES_BBRAM_RED_KEY, + VERSAL_AES_BH_KEY, + VERSAL_AES_BH_RED_KEY, + VERSAL_AES_EFUSE_KEY, + VERSAL_AES_EFUSE_RED_KEY, + VERSAL_AES_EFUSE_USER_KEY_0, + VERSAL_AES_EFUSE_USER_KEY_1, + VERSAL_AES_EFUSE_USER_RED_KEY_0, + VERSAL_AES_EFUSE_USER_RED_KEY_1, + VERSAL_AES_KUP_KEY, + VERSAL_AES_PUF_KEY, + VERSAL_AES_USER_KEY_0, + VERSAL_AES_USER_KEY_1, + VERSAL_AES_USER_KEY_2, + VERSAL_AES_USER_KEY_3, + VERSAL_AES_USER_KEY_4, + VERSAL_AES_USER_KEY_5, + VERSAL_AES_USER_KEY_6, + VERSAL_AES_USER_KEY_7, + VERSAL_AES_EXPANDED_KEYS, + VERSAL_AES_ALL_KEYS, +}; + +enum versal_aead_op { + VERSAL_AES_ENCRYPT = 0, + VERSAL_AES_DECRYPT +}; + +enum versal_aes_keysize { + HW_AES_KEY_SIZE_128 = 0, + HW_AES_KEY_SIZE_256 = 2, +}; + +struct versal_init_ops { + u64 iv; + u32 op; + u32 keysrc; + u32 size; +}; + +struct versal_in_params { + u64 in_data_addr; + u32 size; + u32 is_last; +}; + static int zynqmp_aes_aead_cipher(struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); @@ -187,6 +239,141 @@ freemem: return ret; } +static int versal_aes_aead_cipher(struct aead_request *req) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead); + struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req); + dma_addr_t dma_addr_data, dma_addr_hw_req, dma_addr_in; + u32 total_len = req->assoclen + req->cryptlen; + struct device *dev = tfm_ctx->dev; + struct versal_init_ops *hwreq; + struct versal_in_params *in; + u32 gcm_offset, out_len; + size_t dmabuf_size; + size_t kbuf_size; + void *dmabuf; + char *kbuf; + int ret; + + kbuf_size = total_len + XILINX_AES_AUTH_SIZE; + kbuf = kmalloc(kbuf_size, GFP_KERNEL); + if (unlikely(!kbuf)) { + ret = -ENOMEM; + goto err; + } + dmabuf_size = sizeof(struct versal_init_ops) + + sizeof(struct versal_in_params) + + GCM_AES_IV_SIZE; + dmabuf = kmalloc(dmabuf_size, GFP_KERNEL); + if (unlikely(!dmabuf)) { + ret = -ENOMEM; + goto buf1_free; + } + + dma_addr_hw_req = dma_map_single(dev, dmabuf, dmabuf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, dma_addr_hw_req))) { + ret = -ENOMEM; + goto buf2_free; + } + scatterwalk_map_and_copy(kbuf, req->src, 0, total_len, 0); + dma_addr_data = dma_map_single(dev, kbuf, kbuf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(dev, dma_addr_data))) { + dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL); + ret = -ENOMEM; + goto buf2_free; + } + hwreq = dmabuf; + in = dmabuf + sizeof(struct versal_init_ops); + memcpy(dmabuf + sizeof(struct versal_init_ops) + + sizeof(struct versal_in_params), req->iv, GCM_AES_IV_SIZE); + hwreq->iv = dma_addr_hw_req + sizeof(struct versal_init_ops) + + sizeof(struct versal_in_params); + hwreq->keysrc = tfm_ctx->keysrc; + dma_addr_in = dma_addr_hw_req + sizeof(struct versal_init_ops); + if (rq_ctx->op == XILINX_AES_ENCRYPT) { + hwreq->op = VERSAL_AES_ENCRYPT; + out_len = total_len + crypto_aead_authsize(aead); + in->size = req->cryptlen; + } else { + hwreq->op = VERSAL_AES_DECRYPT; + out_len = total_len - XILINX_AES_AUTH_SIZE; + in->size = req->cryptlen - XILINX_AES_AUTH_SIZE; + } + + if (tfm_ctx->keylen == AES_KEYSIZE_128) + hwreq->size = HW_AES_KEY_SIZE_128; + else + hwreq->size = HW_AES_KEY_SIZE_256; + + /* Request aes key write for volatile user keys */ + if (hwreq->keysrc >= VERSAL_AES_USER_KEY_0 && hwreq->keysrc <= VERSAL_AES_USER_KEY_7) { + ret = versal_pm_aes_key_write(hwreq->size, hwreq->keysrc, + tfm_ctx->key_dma_addr); + if (ret) + goto unmap; + } + + in->in_data_addr = dma_addr_data + req->assoclen; + in->is_last = 1; + gcm_offset = req->assoclen + in->size; + dma_sync_single_for_device(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL); + ret = versal_pm_aes_op_init(dma_addr_hw_req); + if (ret) + goto clearkey; + + if (req->assoclen > 0) { + /* Currently GMAC is OFF by default */ + ret = versal_pm_aes_update_aad(dma_addr_data, req->assoclen); + if (ret) + goto clearkey; + } + if (rq_ctx->op == XILINX_AES_ENCRYPT) { + ret = versal_pm_aes_enc_update(dma_addr_in, + dma_addr_data + req->assoclen); + if (ret) + goto clearkey; + + ret = versal_pm_aes_enc_final(dma_addr_data + gcm_offset); + if (ret) + goto clearkey; + } else { + ret = versal_pm_aes_dec_update(dma_addr_in, + dma_addr_data + req->assoclen); + if (ret) + goto clearkey; + + ret = versal_pm_aes_dec_final(dma_addr_data + gcm_offset); + if (ret) { + ret = -EBADMSG; + goto clearkey; + } + } + dma_unmap_single(dev, dma_addr_data, kbuf_size, DMA_BIDIRECTIONAL); + dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL); + sg_copy_from_buffer(req->dst, sg_nents(req->dst), + kbuf, out_len); + dma_addr_data = 0; + dma_addr_hw_req = 0; + +clearkey: + if (hwreq->keysrc >= VERSAL_AES_USER_KEY_0 && hwreq->keysrc <= VERSAL_AES_USER_KEY_7) + versal_pm_aes_key_zero(hwreq->keysrc); +unmap: + if (unlikely(dma_addr_data)) + dma_unmap_single(dev, dma_addr_data, kbuf_size, DMA_BIDIRECTIONAL); + if (unlikely(dma_addr_hw_req)) + dma_unmap_single(dev, dma_addr_hw_req, dmabuf_size, DMA_BIDIRECTIONAL); +buf2_free: + memzero_explicit(dmabuf, dmabuf_size); + kfree(dmabuf); +buf1_free: + memzero_explicit(kbuf, kbuf_size); + kfree(kbuf); +err: + return ret; +} + static int zynqmp_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx, struct aead_request *req) { @@ -212,13 +399,40 @@ static int zynqmp_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx, return 0; } +static int versal_fallback_check(struct xilinx_aead_tfm_ctx *tfm_ctx, + struct aead_request *req) +{ + struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req); + + if (tfm_ctx->authsize != XILINX_AES_AUTH_SIZE && rq_ctx->op == XILINX_AES_DECRYPT) + return 1; + + if (tfm_ctx->keylen == AES_KEYSIZE_192) + return 1; + + if (req->cryptlen < ZYNQMP_AES_MIN_INPUT_BLK_SIZE || + req->cryptlen % ZYNQMP_AES_WORD_LEN || + req->assoclen % VERSAL_AES_QWORD_LEN) + return 1; + + if (rq_ctx->op == XILINX_AES_DECRYPT && + req->cryptlen <= XILINX_AES_AUTH_SIZE) + return 1; + + return 0; +} + static int xilinx_handle_aes_req(struct crypto_engine *engine, void *req) { struct aead_request *areq = container_of(req, struct aead_request, base); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct aead_alg *alg = crypto_aead_alg(aead); + struct xilinx_aead_alg *drv_ctx; int err; - err = zynqmp_aes_aead_cipher(areq); + drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base); + err = drv_ctx->aes_aead_cipher(areq); local_bh_disable(); crypto_finalize_aead_request(engine, areq, err); local_bh_enable(); @@ -278,6 +492,84 @@ static int zynqmp_paes_aead_setkey(struct crypto_aead *aead, const u8 *key, return err; } +static int versal_aes_aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); + struct xilinx_hwkey_info hwkey; + unsigned char keysrc; + int err; + + tfm_ctx->keysrc = VERSAL_AES_USER_KEY_0; + if (keylen == sizeof(struct xilinx_hwkey_info)) { + memcpy(&hwkey, key, sizeof(struct xilinx_hwkey_info)); + if (hwkey.magic != XILINX_KEY_MAGIC) + return -EINVAL; + + keysrc = hwkey.type; + if (keysrc >= VERSAL_AES_USER_KEY_1 && + keysrc <= VERSAL_AES_USER_KEY_7) { + tfm_ctx->keysrc = keysrc; + tfm_ctx->keylen = sizeof(struct xilinx_hwkey_info); + return 0; + } + return -EINVAL; + } + + if (keylen == AES_KEYSIZE_256 || keylen == AES_KEYSIZE_128) { + tfm_ctx->keylen = keylen; + memcpy(tfm_ctx->key, key, keylen); + dma_sync_single_for_device(tfm_ctx->dev, tfm_ctx->key_dma_addr, + AES_KEYSIZE_256, + DMA_TO_DEVICE); + } + + tfm_ctx->fbk_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; + tfm_ctx->fbk_cipher->base.crt_flags |= (aead->base.crt_flags & + CRYPTO_TFM_REQ_MASK); + err = crypto_aead_setkey(tfm_ctx->fbk_cipher, key, keylen); + if (!err) + tfm_ctx->keylen = keylen; + + return err; +} + +static int versal_paes_aead_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_tfm_ctx(tfm); + struct xilinx_hwkey_info hwkey; + unsigned char keysrc; + int err = 0; + + if (keylen != sizeof(struct xilinx_hwkey_info)) + return -EINVAL; + + memcpy(&hwkey, key, sizeof(struct xilinx_hwkey_info)); + if (hwkey.magic != XILINX_KEY_MAGIC) + return -EINVAL; + + keysrc = hwkey.type; + + switch (keysrc) { + case VERSAL_AES_EFUSE_USER_KEY_0: + case VERSAL_AES_EFUSE_USER_KEY_1: + case VERSAL_AES_EFUSE_USER_RED_KEY_0: + case VERSAL_AES_EFUSE_USER_RED_KEY_1: + case VERSAL_AES_PUF_KEY: + tfm_ctx->keysrc = keysrc; + tfm_ctx->keylen = sizeof(struct xilinx_hwkey_info); + break; + default: + err = -EINVAL; + break; + } + + return err; +} + static int xilinx_aes_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) { @@ -328,6 +620,31 @@ static int zynqmp_aes_aead_encrypt(struct aead_request *req) return crypto_transfer_aead_request_to_engine(drv_ctx->aead_dev->engine, req); } +static int versal_aes_aead_encrypt(struct aead_request *req) +{ + struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead); + struct aead_alg *alg = crypto_aead_alg(aead); + struct xilinx_aead_alg *drv_ctx; + int err; + + drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base); + rq_ctx->op = XILINX_AES_ENCRYPT; + if (tfm_ctx->keysrc >= VERSAL_AES_USER_KEY_0 && + tfm_ctx->keysrc <= VERSAL_AES_USER_KEY_7 && + tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info)) + return -EINVAL; + err = versal_fallback_check(tfm_ctx, req); + if (err && (tfm_ctx->keysrc < VERSAL_AES_USER_KEY_0 || + tfm_ctx->keysrc > VERSAL_AES_USER_KEY_7)) + return -EOPNOTSUPP; + if (err) + return xilinx_aes_fallback_crypt(req, true); + + return crypto_transfer_aead_request_to_engine(drv_ctx->aead_dev->engine, req); +} + static int zynqmp_aes_aead_decrypt(struct aead_request *req) { struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req); @@ -368,6 +685,33 @@ static int xilinx_paes_aead_init(struct crypto_aead *aead) return 0; } +static int versal_aes_aead_decrypt(struct aead_request *req) +{ + struct xilinx_aead_req_ctx *rq_ctx = aead_request_ctx(req); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct xilinx_aead_tfm_ctx *tfm_ctx = crypto_aead_ctx(aead); + struct aead_alg *alg = crypto_aead_alg(aead); + struct xilinx_aead_alg *drv_ctx; + int err; + + drv_ctx = container_of(alg, struct xilinx_aead_alg, aead.base); + rq_ctx->op = XILINX_AES_DECRYPT; + if (tfm_ctx->keysrc >= VERSAL_AES_USER_KEY_0 && + tfm_ctx->keysrc <= VERSAL_AES_USER_KEY_7 && + tfm_ctx->keylen == sizeof(struct xilinx_hwkey_info)) + return -EINVAL; + + err = versal_fallback_check(tfm_ctx, req); + if (err && + (tfm_ctx->keysrc < VERSAL_AES_USER_KEY_0 || + tfm_ctx->keysrc > VERSAL_AES_USER_KEY_7)) + return -EOPNOTSUPP; + if (err) + return xilinx_aes_fallback_crypt(req, false); + + return crypto_transfer_aead_request_to_engine(drv_ctx->aead_dev->engine, req); +} + static int xilinx_aes_aead_init(struct crypto_aead *aead) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); @@ -384,8 +728,8 @@ static int xilinx_aes_aead_init(struct crypto_aead *aead) CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(tfm_ctx->fbk_cipher)) { - pr_err("%s() Error: failed to allocate fallback for %s\n", - __func__, drv_ctx->aead.base.base.cra_name); + dev_err(tfm_ctx->dev, "failed to allocate fallback for %s\n", + drv_ctx->aead.base.base.cra_name); return PTR_ERR(tfm_ctx->fbk_cipher); } tfm_ctx->key = kmalloc(AES_KEYSIZE_256, GFP_KERNEL); @@ -433,6 +777,7 @@ static void xilinx_aes_aead_exit(struct crypto_aead *aead) static struct xilinx_aead_alg zynqmp_aes_algs[] = { { + .aes_aead_cipher = zynqmp_aes_aead_cipher, .aead.base = { .setkey = zynqmp_aes_aead_setkey, .setauthsize = xilinx_aes_aead_setauthsize, @@ -462,6 +807,7 @@ static struct xilinx_aead_alg zynqmp_aes_algs[] = { .dma_bit_mask = ZYNQMP_DMA_BIT_MASK, }, { + .aes_aead_cipher = zynqmp_aes_aead_cipher, .aead.base = { .setkey = zynqmp_paes_aead_setkey, .setauthsize = xilinx_aes_aead_setauthsize, @@ -492,12 +838,80 @@ static struct xilinx_aead_alg zynqmp_aes_algs[] = { { /* sentinel */ } }; +static struct xilinx_aead_alg versal_aes_algs[] = { + { + .aes_aead_cipher = versal_aes_aead_cipher, + .aead.base = { + .setkey = versal_aes_aead_setkey, + .setauthsize = xilinx_aes_aead_setauthsize, + .encrypt = versal_aes_aead_encrypt, + .decrypt = versal_aes_aead_decrypt, + .init = xilinx_aes_aead_init, + .exit = xilinx_aes_aead_exit, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = XILINX_AES_AUTH_SIZE, + .base = { + .cra_name = "gcm(aes)", + .cra_driver_name = "versal-aes-gcm", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .cra_blocksize = XILINX_AES_BLK_SIZE, + .cra_ctxsize = sizeof(struct xilinx_aead_tfm_ctx), + .cra_module = THIS_MODULE, + } + }, + .aead.op = { + .do_one_request = xilinx_handle_aes_req, + }, + .dma_bit_mask = VERSAL_DMA_BIT_MASK, + }, + { + .aes_aead_cipher = versal_aes_aead_cipher, + .aead.base = { + .setkey = versal_paes_aead_setkey, + .setauthsize = xilinx_aes_aead_setauthsize, + .encrypt = versal_aes_aead_encrypt, + .decrypt = versal_aes_aead_decrypt, + .init = xilinx_paes_aead_init, + .exit = xilinx_paes_aead_exit, + .ivsize = GCM_AES_IV_SIZE, + .maxauthsize = XILINX_AES_AUTH_SIZE, + .base = { + .cra_name = "gcm(paes)", + .cra_driver_name = "versal-paes-gcm", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | + CRYPTO_ALG_ASYNC | + CRYPTO_ALG_ALLOCATES_MEMORY | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_blocksize = XILINX_AES_BLK_SIZE, + .cra_ctxsize = sizeof(struct xilinx_aead_tfm_ctx), + .cra_module = THIS_MODULE, + } + }, + .aead.op = { + .do_one_request = xilinx_handle_aes_req, + }, + .dma_bit_mask = VERSAL_DMA_BIT_MASK, + }, + { /* sentinel */ } +}; + static struct xlnx_feature aes_feature_map[] = { { .family = PM_ZYNQMP_FAMILY_CODE, .feature_id = PM_SECURE_AES, .data = zynqmp_aes_algs, }, + { + .family = PM_VERSAL_FAMILY_CODE, + .feature_id = XSECURE_API_AES_OP_INIT, + .data = versal_aes_algs, + }, { /* sentinel */ } }; @@ -534,14 +948,13 @@ static int xilinx_aes_aead_probe(struct platform_device *pdev) aead_dev->engine = crypto_engine_alloc_init(dev, 1); if (!aead_dev->engine) { dev_err(dev, "Cannot alloc AES engine\n"); - err = -ENOMEM; - goto err_engine; + return -ENOMEM; } err = crypto_engine_start(aead_dev->engine); if (err) { dev_err(dev, "Cannot start AES engine\n"); - goto err_engine; + goto err_engine_start; } for (i = 0; aead_dev->aead_algs[i].dma_bit_mask; i++) { @@ -552,14 +965,13 @@ static int xilinx_aes_aead_probe(struct platform_device *pdev) goto err_alg_register; } } - return 0; return 0; err_alg_register: while (i > 0) crypto_engine_unregister_aead(&aead_dev->aead_algs[--i].aead); -err_engine: +err_engine_start: crypto_engine_exit(aead_dev->engine); return err; -- 2.47.3