}
if (walk.nbytes) {
u8 __aligned(8) tail[AES_BLOCK_SIZE];
+ const u8 *tsrc = walk.src.virt.addr;
unsigned int nbytes = walk.nbytes;
u8 *tdst = walk.dst.virt.addr;
- u8 *tsrc = walk.src.virt.addr;
/*
* Tell aes_ctr_encrypt() to process a tail block.
struct skcipher_walk walk;
int nbytes, err;
int first = 1;
- u8 *out, *in;
+ const u8 *in;
+ u8 *out;
if (req->cryptlen < AES_BLOCK_SIZE)
return -EINVAL;
asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
void *key);
asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
-asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
+asmlinkage void aes_p10_gcm_encrypt(const u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
-asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
+asmlinkage void aes_p10_gcm_decrypt(const u8 *in, u8 *out, size_t len,
void *rkey, u8 *iv, void *Xi);
asmlinkage void gcm_init_htable(unsigned char htable[], unsigned char Xi[]);
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
return ret;
while ((nbytes = walk.nbytes) > 0 && ret == 0) {
- u8 *src = walk.src.virt.addr;
+ const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
u8 buf[AES_BLOCK_SIZE];
static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx,
struct skcipher_walk *walk)
{
+ const u8 *src = walk->src.virt.addr;
u8 *ctrblk = walk->iv;
u8 keystream[AES_BLOCK_SIZE];
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
{
u8 *ctrblk = walk->iv;
u64 keystream[AES_BLOCK_SIZE / sizeof(u64)];
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
err = skcipher_walk_virt(&walk, req, false);
while ((nbytes = walk.nbytes)) {
- u8 *wsrc = walk.src.virt.addr;
+ const u8 *wsrc = walk.src.virt.addr;
u8 *wdst = walk.dst.virt.addr;
/* Process four block batch */
u8 *ctrblk = walk->iv;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned int bsize = crypto_cipher_blocksize(tfm);
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
+ const u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
unsigned int nbytes = walk->nbytes;
unsigned int bsize = crypto_cipher_blocksize(tfm);
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
+ u8 *dst = walk->dst.virt.addr;
u8 *ctrblk = walk->iv;
- u8 *src = walk->src.virt.addr;
u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
do {
/* create keystream */
fn(crypto_cipher_tfm(tfm), keystream, ctrblk);
- crypto_xor(src, keystream, bsize);
+ crypto_xor(dst, keystream, bsize);
/* increment counter in counterblock */
crypto_inc(ctrblk, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
while (w.nbytes) {
unsigned int avail = w.nbytes;
- be128 *wsrc;
+ const be128 *wsrc;
be128 *wdst;
wsrc = w.src.virt.addr;
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE];
do {
- memcpy(tmpbuf, src, bsize);
- crypto_xor(iv, src, bsize);
- crypto_cipher_encrypt_one(tfm, src, iv);
- crypto_xor_cpy(iv, tmpbuf, src, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_xor(iv, dst, bsize);
+ crypto_cipher_encrypt_one(tfm, dst, iv);
+ crypto_xor_cpy(iv, tmpbuf, dst, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
struct crypto_cipher *tfm)
{
int bsize = crypto_cipher_blocksize(tfm);
+ const u8 *src = walk->src.virt.addr;
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
{
int bsize = crypto_cipher_blocksize(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *src = walk->src.virt.addr;
+ u8 *dst = walk->dst.virt.addr;
u8 * const iv = walk->iv;
u8 tmpbuf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(u32));
do {
- memcpy(tmpbuf, src, bsize);
- crypto_cipher_decrypt_one(tfm, src, src);
- crypto_xor(src, iv, bsize);
- crypto_xor_cpy(iv, src, tmpbuf, bsize);
+ memcpy(tmpbuf, dst, bsize);
+ crypto_cipher_decrypt_one(tfm, dst, dst);
+ crypto_xor(dst, iv, bsize);
+ crypto_xor_cpy(iv, dst, tmpbuf, bsize);
- src += bsize;
+ dst += bsize;
} while ((nbytes -= bsize) >= bsize);
return nbytes;
crypto_cipher_alg(tfm)->cia_encrypt;
unsigned long alignmask = crypto_cipher_alignmask(tfm);
unsigned int nbytes = walk->nbytes;
- u8 *data = walk->src.virt.addr;
+ u8 *data = walk->dst.virt.addr;
u8 tmp[XCTR_BLOCKSIZE + MAX_CIPHER_ALIGNMASK];
u8 *keystream = PTR_ALIGN(tmp + 0, alignmask + 1);
__le32 ctr32 = cpu_to_le32(byte_ctr / XCTR_BLOCKSIZE + 1);
while (w.nbytes) {
unsigned int avail = w.nbytes;
- le128 *wsrc;
+ const le128 *wsrc;
le128 *wdst;
wsrc = w.src.virt.addr;
err = skcipher_walk_virt(&walk, req, false);
while (walk.nbytes > 0) {
+ const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
- u8 *src = walk.src.virt.addr;
int nbytes = walk.nbytes;
int tail = 0;
/* Virtual address of the source. */
struct {
struct {
- void *const addr;
+ const void *const addr;
} virt;
} src;