return false;
}
-static int reconstruct_iov(AspeedHACEState *s, struct iovec *iov, int id,
- uint32_t *pad_offset)
-{
- int i, iov_count;
- if (*pad_offset != 0) {
- s->iov_cache[s->iov_count].iov_base = iov[id].iov_base;
- s->iov_cache[s->iov_count].iov_len = *pad_offset;
- ++s->iov_count;
- }
- for (i = 0; i < s->iov_count; i++) {
- iov[i].iov_base = s->iov_cache[i].iov_base;
- iov[i].iov_len = s->iov_cache[i].iov_len;
- }
- iov_count = s->iov_count;
- s->iov_count = 0;
- s->total_req_len = 0;
- return iov_count;
-}
-
static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode,
bool acc_mode)
{
iov[0].iov_base = haddr;
iov[0].iov_len = len;
i = 1;
-
- if (s->iov_count) {
- /*
- * In aspeed sdk kernel driver, sg_mode is disabled in hash_final().
- * Thus if we received a request with sg_mode disabled, it is
- * required to check whether cache is empty. If no, we should
- * combine cached iov and the current iov.
- */
- s->total_req_len += len;
- if (has_padding(s, iov, len, &total_msg_len, &pad_offset)) {
- i = reconstruct_iov(s, iov, 0, &pad_offset);
- }
- }
}
if (acc_mode) {
qcrypto_hash_free(s->hash_ctx);
s->hash_ctx = NULL;
- s->iov_count = 0;
s->total_req_len = 0;
}
} else if (qcrypto_hash_bytesv(algo, iov, i, &digest_buf,
}
memset(s->regs, 0, sizeof(s->regs));
- s->iov_count = 0;
s->total_req_len = 0;
}
static const VMStateDescription vmstate_aspeed_hace = {
.name = TYPE_ASPEED_HACE,
- .version_id = 1,
- .minimum_version_id = 1,
+ .version_id = 2,
+ .minimum_version_id = 2,
.fields = (const VMStateField[]) {
VMSTATE_UINT32_ARRAY(regs, AspeedHACEState, ASPEED_HACE_NR_REGS),
VMSTATE_UINT32(total_req_len, AspeedHACEState),
- VMSTATE_UINT32(iov_count, AspeedHACEState),
VMSTATE_END_OF_LIST(),
}
};