int i, ret;
ctx->qps = sec_create_qps();
- if (!ctx->qps) {
- pr_err("Can not create sec qps!\n");
+ if (!ctx->qps)
return -ENODEV;
- }
sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm);
ctx->sec = sec;
{
int i;
+ if (!ctx->qps)
+ return;
+
for (i = 0; i < ctx->sec->ctx_q_num; i++)
sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]);
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ if (!ctx->qps)
+ return 0;
+
c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
&c_ctx->c_key_dma, GFP_KERNEL);
if (!c_ctx->c_key)
{
struct sec_cipher_ctx *c_ctx = &ctx->c_ctx;
+ if (!ctx->qps)
+ return;
+
memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE);
dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE,
c_ctx->c_key, c_ctx->c_key_dma);
{
struct sec_auth_ctx *a_ctx = &ctx->a_ctx;
+ if (!ctx->qps)
+ return;
+
memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE);
dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE,
a_ctx->a_key, a_ctx->a_key_dma);
}
ret = sec_ctx_base_init(ctx);
- if (ret)
+ if (ret && ret != -ENODEV)
return ret;
ret = sec_cipher_init(ctx);
struct device *dev = ctx->dev;
int ret;
+ if (!ctx->qps)
+ goto set_soft_key;
+
if (c_mode == SEC_CMODE_XTS) {
ret = xts_verify_key(tfm, key, keylen);
if (ret) {
}
memcpy(c_ctx->c_key, key, keylen);
- if (c_ctx->fbtfm) {
- ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
- if (ret) {
- dev_err(dev, "failed to set fallback skcipher key!\n");
- return ret;
- }
+
+set_soft_key:
+ ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen);
+ if (ret) {
+ dev_err(dev, "failed to set fallback skcipher key!\n");
+ return ret;
}
+
return 0;
}
struct crypto_authenc_keys keys;
int ret;
+ if (!ctx->qps)
+ return sec_aead_fallback_setkey(a_ctx, tfm, key, keylen);
+
ctx->a_ctx.a_alg = a_alg;
ctx->c_ctx.c_alg = c_alg;
c_ctx->c_mode = c_mode;
if (ret)
return ret;
+ if (!ctx->qps)
+ return 0;
+
if (ctx->sec->qm.ver < QM_HW_V3) {
ctx->type_supported = SEC_BD_TYPE2;
ctx->req_op = &sec_skcipher_req_ops;
ctx->req_op = &sec_skcipher_req_ops_v3;
}
- return ret;
+ return 0;
}
static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm)
int ret;
ret = sec_aead_init(tfm);
- if (ret) {
+ if (ret && ret != -ENODEV) {
pr_err("hisi_sec2: aead init error!\n");
return ret;
}
int ret;
ret = sec_aead_init(tfm);
- if (ret) {
+ if (ret && ret != -ENODEV) {
dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n");
return ret;
}
bool need_fallback = false;
int ret;
+ if (!ctx->qps)
+ goto soft_crypto;
+
if (!sk_req->cryptlen) {
if (ctx->c_ctx.c_mode == SEC_CMODE_XTS)
return -EINVAL;
return -EINVAL;
if (unlikely(ctx->c_ctx.fallback || need_fallback))
- return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
+ goto soft_crypto;
return ctx->req_op->process(ctx, req);
+
+soft_crypto:
+ return sec_skcipher_soft_crypto(ctx, sk_req, encrypt);
}
static int sec_skcipher_encrypt(struct skcipher_request *sk_req)
bool need_fallback = false;
int ret;
+ if (!ctx->qps)
+ goto soft_crypto;
+
req->flag = a_req->base.flags;
req->aead_req.aead_req = a_req;
req->c_req.encrypt = encrypt;
ret = sec_aead_param_check(ctx, req, &need_fallback);
if (unlikely(ret)) {
if (need_fallback)
- return sec_aead_soft_crypto(ctx, a_req, encrypt);
+ goto soft_crypto;
return -EINVAL;
}
return ctx->req_op->process(ctx, req);
+
+soft_crypto:
+ return sec_aead_soft_crypto(ctx, a_req, encrypt);
}
static int sec_aead_encrypt(struct aead_request *a_req)