se->base + se->hw->regs->result + (i * 4));
}
+static int tegra_cmac_do_init(struct ahash_request *req)
+{
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+ int i;
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->task |= SHA_FIRST;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ return -ENOMEM;
+
+ rctx->residue.size = 0;
+
+ /* Clear any previous result */
+ for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
+ writel(0, se->base + se->hw->regs->result + (i * 4));
+
+ return 0;
+}
+
static int tegra_cmac_do_update(struct ahash_request *req)
{
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
struct tegra_se *se = ctx->se;
int ret = 0;
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_cmac_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
if (rctx->task & SHA_UPDATE) {
ret = tegra_cmac_do_update(req);
if (ret)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_cmac_init(struct ahash_request *req)
-{
- struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
- int i;
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->task = SHA_FIRST;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size * 2,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- return -ENOMEM;
-
- rctx->residue.size = 0;
-
- /* Clear any previous result */
- for (i = 0; i < CMAC_RESULT_REG_COUNT; i++)
- writel(0, se->base + se->hw->regs->result + (i * 4));
-
- return 0;
-}
-
static int tegra_cmac_setkey(struct crypto_ahash *tfm, const u8 *key,
unsigned int keylen)
{
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
}
+static int tegra_cmac_init(struct ahash_request *req)
+{
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
static int tegra_cmac_update(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_cmac_ctx *ctx = crypto_ahash_ctx(tfm);
struct tegra_cmac_reqctx *rctx = ahash_request_ctx(req);
- int ret;
- ret = tegra_cmac_init(req);
- if (ret)
- return ret;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}
se->base + se->hw->regs->result + (i * 4));
}
+static int tegra_sha_do_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+ struct tegra_se *se = ctx->se;
+
+ if (ctx->fallback)
+ return tegra_sha_fallback_init(req);
+
+ rctx->total_len = 0;
+ rctx->datbuf.size = 0;
+ rctx->residue.size = 0;
+ rctx->key_id = ctx->key_id;
+ rctx->task |= SHA_FIRST;
+ rctx->alg = ctx->alg;
+ rctx->blk_size = crypto_ahash_blocksize(tfm);
+ rctx->digest.size = crypto_ahash_digestsize(tfm);
+
+ rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
+ &rctx->digest.addr, GFP_KERNEL);
+ if (!rctx->digest.buf)
+ goto digbuf_fail;
+
+ rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
+ &rctx->residue.addr, GFP_KERNEL);
+ if (!rctx->residue.buf)
+ goto resbuf_fail;
+
+ return 0;
+
+resbuf_fail:
+ dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
+ rctx->digest.addr);
+digbuf_fail:
+ return -ENOMEM;
+}
+
static int tegra_sha_do_update(struct ahash_request *req)
{
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req));
struct tegra_se *se = ctx->se;
int ret = 0;
+ if (rctx->task & SHA_INIT) {
+ ret = tegra_sha_do_init(req);
+ if (ret)
+ goto out;
+
+ rctx->task &= ~SHA_INIT;
+ }
+
if (rctx->task & SHA_UPDATE) {
ret = tegra_sha_do_update(req);
if (ret)
tegra_key_invalidate(ctx->se, ctx->key_id, ctx->alg);
}
-static int tegra_sha_init(struct ahash_request *req)
-{
- struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- struct tegra_se *se = ctx->se;
-
- if (ctx->fallback)
- return tegra_sha_fallback_init(req);
-
- rctx->total_len = 0;
- rctx->datbuf.size = 0;
- rctx->residue.size = 0;
- rctx->key_id = ctx->key_id;
- rctx->task = SHA_FIRST;
- rctx->alg = ctx->alg;
- rctx->blk_size = crypto_ahash_blocksize(tfm);
- rctx->digest.size = crypto_ahash_digestsize(tfm);
-
- rctx->digest.buf = dma_alloc_coherent(se->dev, rctx->digest.size,
- &rctx->digest.addr, GFP_KERNEL);
- if (!rctx->digest.buf)
- goto digbuf_fail;
-
- rctx->residue.buf = dma_alloc_coherent(se->dev, rctx->blk_size,
- &rctx->residue.addr, GFP_KERNEL);
- if (!rctx->residue.buf)
- goto resbuf_fail;
-
- return 0;
-
-resbuf_fail:
- dma_free_coherent(se->dev, rctx->digest.size, rctx->digest.buf,
- rctx->digest.addr);
-digbuf_fail:
- return -ENOMEM;
-}
-
static int tegra_hmac_fallback_setkey(struct tegra_sha_ctx *ctx, const u8 *key,
unsigned int keylen)
{
return tegra_key_submit(ctx->se, key, keylen, ctx->alg, &ctx->key_id);
}
+static int tegra_sha_init(struct ahash_request *req)
+{
+ struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+ struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
+
+ rctx->task = SHA_INIT;
+
+ return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
+}
+
static int tegra_sha_update(struct ahash_request *req)
{
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct tegra_sha_reqctx *rctx = ahash_request_ctx(req);
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
struct tegra_sha_ctx *ctx = crypto_ahash_ctx(tfm);
- int ret;
if (ctx->fallback)
return tegra_sha_fallback_digest(req);
- ret = tegra_sha_init(req);
- if (ret)
- return ret;
+ rctx->task |= SHA_INIT | SHA_UPDATE | SHA_FINAL;
- rctx->task |= SHA_UPDATE | SHA_FINAL;
return crypto_transfer_hash_request_to_engine(ctx->se->engine, req);
}