--- /dev/null
+From 030f4e968741d65aea9cd5f7814d1164967801ef Mon Sep 17 00:00:00 2001
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Tue, 7 Jul 2015 17:30:25 +0800
+Subject: crypto: nx - Fix reentrancy bugs
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 030f4e968741d65aea9cd5f7814d1164967801ef upstream.
+
+This patch fixes a host of reentrancy bugs in the nx driver. The
+following algorithms are affected:
+
+* CCM
+* GCM
+* CTR
+* XCBC
+* SHA256
+* SHA512
+
+The crypto API allows a single transform to be used by multiple
+threads simultaneously. For example, IPsec will use a single tfm
+to process packets for a given SA. As packets may arrive on
+multiple CPUs that tfm must be reentrant.
+
+The nx driver does try to deal with this by using a spin lock.
+Unfortunately only the basic AES/CBC/ECB algorithms do this in
+the correct way.
+
+The symptom of these bugs may range from the generation of incorrect
+output to memory corruption.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/nx/nx-aes-ccm.c | 6 ++-
+ drivers/crypto/nx/nx-aes-ctr.c | 7 ++--
+ drivers/crypto/nx/nx-aes-gcm.c | 17 +++++----
+ drivers/crypto/nx/nx-aes-xcbc.c | 70 +++++++++++++++++++++++++---------------
+ drivers/crypto/nx/nx-sha256.c | 43 +++++++++++++-----------
+ drivers/crypto/nx/nx-sha512.c | 44 ++++++++++++++-----------
+ drivers/crypto/nx/nx.c | 2 +
+ drivers/crypto/nx/nx.h | 14 ++++++--
+ 8 files changed, 124 insertions(+), 79 deletions(-)
+
+--- a/drivers/crypto/nx/nx-aes-ccm.c
++++ b/drivers/crypto/nx/nx-aes-ccm.c
+@@ -494,8 +494,9 @@ out:
+ static int ccm4309_aes_nx_encrypt(struct aead_request *req)
+ {
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
++ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ struct blkcipher_desc desc;
+- u8 *iv = nx_ctx->priv.ccm.iv;
++ u8 *iv = rctx->iv;
+
+ iv[0] = 3;
+ memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+@@ -525,8 +526,9 @@ static int ccm_aes_nx_encrypt(struct aea
+ static int ccm4309_aes_nx_decrypt(struct aead_request *req)
+ {
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
++ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ struct blkcipher_desc desc;
+- u8 *iv = nx_ctx->priv.ccm.iv;
++ u8 *iv = rctx->iv;
+
+ iv[0] = 3;
+ memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3);
+--- a/drivers/crypto/nx/nx-aes-ctr.c
++++ b/drivers/crypto/nx/nx-aes-ctr.c
+@@ -72,7 +72,7 @@ static int ctr3686_aes_nx_set_key(struct
+ if (key_len < CTR_RFC3686_NONCE_SIZE)
+ return -EINVAL;
+
+- memcpy(nx_ctx->priv.ctr.iv,
++ memcpy(nx_ctx->priv.ctr.nonce,
+ in_key + key_len - CTR_RFC3686_NONCE_SIZE,
+ CTR_RFC3686_NONCE_SIZE);
+
+@@ -131,14 +131,15 @@ static int ctr3686_aes_nx_crypt(struct b
+ unsigned int nbytes)
+ {
+ struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm);
+- u8 *iv = nx_ctx->priv.ctr.iv;
++ u8 iv[16];
+
++ memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_IV_SIZE);
+ memcpy(iv + CTR_RFC3686_NONCE_SIZE,
+ desc->info, CTR_RFC3686_IV_SIZE);
+ iv[12] = iv[13] = iv[14] = 0;
+ iv[15] = 1;
+
+- desc->info = nx_ctx->priv.ctr.iv;
++ desc->info = iv;
+
+ return ctr_aes_nx_crypt(desc, dst, src, nbytes);
+ }
+--- a/drivers/crypto/nx/nx-aes-gcm.c
++++ b/drivers/crypto/nx/nx-aes-gcm.c
+@@ -330,6 +330,7 @@ out:
+ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
+ {
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
++ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct blkcipher_desc desc;
+ unsigned int nbytes = req->cryptlen;
+@@ -339,7 +340,7 @@ static int gcm_aes_nx_crypt(struct aead_
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
+- desc.info = nx_ctx->priv.gcm.iv;
++ desc.info = rctx->iv;
+ /* initialize the counter */
+ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1;
+
+@@ -434,8 +435,8 @@ out:
+
+ static int gcm_aes_nx_encrypt(struct aead_request *req)
+ {
+- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+- char *iv = nx_ctx->priv.gcm.iv;
++ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++ char *iv = rctx->iv;
+
+ memcpy(iv, req->iv, 12);
+
+@@ -444,8 +445,8 @@ static int gcm_aes_nx_encrypt(struct aea
+
+ static int gcm_aes_nx_decrypt(struct aead_request *req)
+ {
+- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+- char *iv = nx_ctx->priv.gcm.iv;
++ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++ char *iv = rctx->iv;
+
+ memcpy(iv, req->iv, 12);
+
+@@ -455,7 +456,8 @@ static int gcm_aes_nx_decrypt(struct aea
+ static int gcm4106_aes_nx_encrypt(struct aead_request *req)
+ {
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+- char *iv = nx_ctx->priv.gcm.iv;
++ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++ char *iv = rctx->iv;
+ char *nonce = nx_ctx->priv.gcm.nonce;
+
+ memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+@@ -467,7 +469,8 @@ static int gcm4106_aes_nx_encrypt(struct
+ static int gcm4106_aes_nx_decrypt(struct aead_request *req)
+ {
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm);
+- char *iv = nx_ctx->priv.gcm.iv;
++ struct nx_gcm_rctx *rctx = aead_request_ctx(req);
++ char *iv = rctx->iv;
+ char *nonce = nx_ctx->priv.gcm.nonce;
+
+ memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
+--- a/drivers/crypto/nx/nx-aes-xcbc.c
++++ b/drivers/crypto/nx/nx-aes-xcbc.c
+@@ -42,6 +42,7 @@ static int nx_xcbc_set_key(struct crypto
+ unsigned int key_len)
+ {
+ struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc);
++ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+
+ switch (key_len) {
+ case AES_KEYSIZE_128:
+@@ -51,7 +52,7 @@ static int nx_xcbc_set_key(struct crypto
+ return -EINVAL;
+ }
+
+- memcpy(nx_ctx->priv.xcbc.key, in_key, key_len);
++ memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len);
+
+ return 0;
+ }
+@@ -148,32 +149,29 @@ out:
+ return rc;
+ }
+
+-static int nx_xcbc_init(struct shash_desc *desc)
++static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm)
+ {
+- struct xcbc_state *sctx = shash_desc_ctx(desc);
+- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
++ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+- struct nx_sg *out_sg;
+- int len;
++ int err;
+
+- nx_ctx_init(nx_ctx, HCOP_FC_AES);
++ err = nx_crypto_ctx_aes_xcbc_init(tfm);
++ if (err)
++ return err;
+
+- memset(sctx, 0, sizeof *sctx);
++ nx_ctx_init(nx_ctx, HCOP_FC_AES);
+
+ NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
+ csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC;
+
+- memcpy(csbcpb->cpb.aes_xcbc.key, nx_ctx->priv.xcbc.key, AES_BLOCK_SIZE);
+- memset(nx_ctx->priv.xcbc.key, 0, sizeof *nx_ctx->priv.xcbc.key);
+-
+- len = AES_BLOCK_SIZE;
+- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+- &len, nx_ctx->ap->sglen);
++ return 0;
++}
+
+- if (len != AES_BLOCK_SIZE)
+- return -EINVAL;
++static int nx_xcbc_init(struct shash_desc *desc)
++{
++ struct xcbc_state *sctx = shash_desc_ctx(desc);
+
+- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++ memset(sctx, 0, sizeof *sctx);
+
+ return 0;
+ }
+@@ -186,6 +184,7 @@ static int nx_xcbc_update(struct shash_d
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
+ struct nx_sg *in_sg;
++ struct nx_sg *out_sg;
+ u32 to_process = 0, leftover, total;
+ unsigned int max_sg_len;
+ unsigned long irq_flags;
+@@ -213,6 +212,17 @@ static int nx_xcbc_update(struct shash_d
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
++ data_len = AES_BLOCK_SIZE;
++ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++ &len, nx_ctx->ap->sglen);
++
++ if (data_len != AES_BLOCK_SIZE) {
++ rc = -EINVAL;
++ goto out;
++ }
++
++ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++
+ do {
+ to_process = total - to_process;
+ to_process = to_process & ~(AES_BLOCK_SIZE - 1);
+@@ -235,8 +245,10 @@ static int nx_xcbc_update(struct shash_d
+ (u8 *) sctx->buffer,
+ &data_len,
+ max_sg_len);
+- if (data_len != sctx->count)
+- return -EINVAL;
++ if (data_len != sctx->count) {
++ rc = -EINVAL;
++ goto out;
++ }
+ }
+
+ data_len = to_process - sctx->count;
+@@ -245,8 +257,10 @@ static int nx_xcbc_update(struct shash_d
+ &data_len,
+ max_sg_len);
+
+- if (data_len != to_process - sctx->count)
+- return -EINVAL;
++ if (data_len != to_process - sctx->count) {
++ rc = -EINVAL;
++ goto out;
++ }
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) *
+ sizeof(struct nx_sg);
+@@ -325,15 +339,19 @@ static int nx_xcbc_final(struct shash_de
+ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer,
+ &len, nx_ctx->ap->sglen);
+
+- if (len != sctx->count)
+- return -EINVAL;
++ if (len != sctx->count) {
++ rc = -EINVAL;
++ goto out;
++ }
+
+ len = AES_BLOCK_SIZE;
+ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
+ nx_ctx->ap->sglen);
+
+- if (len != AES_BLOCK_SIZE)
+- return -EINVAL;
++ if (len != AES_BLOCK_SIZE) {
++ rc = -EINVAL;
++ goto out;
++ }
+
+ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+@@ -372,7 +390,7 @@ struct shash_alg nx_shash_aes_xcbc_alg =
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+- .cra_init = nx_crypto_ctx_aes_xcbc_init,
++ .cra_init = nx_crypto_ctx_aes_xcbc_init2,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+ };
+--- a/drivers/crypto/nx/nx-sha256.c
++++ b/drivers/crypto/nx/nx-sha256.c
+@@ -29,34 +29,28 @@
+ #include "nx.h"
+
+
+-static int nx_sha256_init(struct shash_desc *desc)
++static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm)
+ {
+- struct sha256_state *sctx = shash_desc_ctx(desc);
+- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+- struct nx_sg *out_sg;
+- int len;
+- u32 max_sg_len;
++ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
++ int err;
+
+- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
++ err = nx_crypto_ctx_sha_init(tfm);
++ if (err)
++ return err;
+
+- memset(sctx, 0, sizeof *sctx);
++ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256];
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
+
+- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+- max_sg_len = min_t(u64, max_sg_len,
+- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++ return 0;
++}
+
+- len = SHA256_DIGEST_SIZE;
+- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+- &len, max_sg_len);
+- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++static int nx_sha256_init(struct shash_desc *desc) {
++ struct sha256_state *sctx = shash_desc_ctx(desc);
+
+- if (len != SHA256_DIGEST_SIZE)
+- return -EINVAL;
++ memset(sctx, 0, sizeof *sctx);
+
+ sctx->state[0] = __cpu_to_be32(SHA256_H0);
+ sctx->state[1] = __cpu_to_be32(SHA256_H1);
+@@ -78,6 +72,7 @@ static int nx_sha256_update(struct shash
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg;
++ struct nx_sg *out_sg;
+ u64 to_process = 0, leftover, total;
+ unsigned long irq_flags;
+ int rc = 0;
+@@ -108,6 +103,16 @@ static int nx_sha256_update(struct shash
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
++ data_len = SHA256_DIGEST_SIZE;
++ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++ &data_len, max_sg_len);
++ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++
++ if (data_len != SHA256_DIGEST_SIZE) {
++ rc = -EINVAL;
++ goto out;
++ }
++
+ do {
+ /*
+ * to_process: the SHA256_BLOCK_SIZE data chunk to process in
+@@ -282,7 +287,7 @@ struct shash_alg nx_shash_sha256_alg = {
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+- .cra_init = nx_crypto_ctx_sha_init,
++ .cra_init = nx_crypto_ctx_sha256_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+ };
+--- a/drivers/crypto/nx/nx-sha512.c
++++ b/drivers/crypto/nx/nx-sha512.c
+@@ -28,34 +28,29 @@
+ #include "nx.h"
+
+
+-static int nx_sha512_init(struct shash_desc *desc)
++static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm)
+ {
+- struct sha512_state *sctx = shash_desc_ctx(desc);
+- struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+- struct nx_sg *out_sg;
+- int len;
+- u32 max_sg_len;
++ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm);
++ int err;
+
+- nx_ctx_init(nx_ctx, HCOP_FC_SHA);
++ err = nx_crypto_ctx_sha_init(tfm);
++ if (err)
++ return err;
+
+- memset(sctx, 0, sizeof *sctx);
++ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+ nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512];
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
+
+- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+- max_sg_len = min_t(u64, max_sg_len,
+- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++ return 0;
++}
+
+- len = SHA512_DIGEST_SIZE;
+- out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
+- &len, max_sg_len);
+- nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++static int nx_sha512_init(struct shash_desc *desc)
++{
++ struct sha512_state *sctx = shash_desc_ctx(desc);
+
+- if (len != SHA512_DIGEST_SIZE)
+- return -EINVAL;
++ memset(sctx, 0, sizeof *sctx);
+
+ sctx->state[0] = __cpu_to_be64(SHA512_H0);
+ sctx->state[1] = __cpu_to_be64(SHA512_H1);
+@@ -77,6 +72,7 @@ static int nx_sha512_update(struct shash
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
+ struct nx_sg *in_sg;
++ struct nx_sg *out_sg;
+ u64 to_process, leftover = 0, total;
+ unsigned long irq_flags;
+ int rc = 0;
+@@ -107,6 +103,16 @@ static int nx_sha512_update(struct shash
+ max_sg_len = min_t(u64, max_sg_len,
+ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+
++ data_len = SHA512_DIGEST_SIZE;
++ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++ &data_len, max_sg_len);
++ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
++
++ if (data_len != SHA512_DIGEST_SIZE) {
++ rc = -EINVAL;
++ goto out;
++ }
++
+ do {
+ /*
+ * to_process: the SHA512_BLOCK_SIZE data chunk to process in
+@@ -288,7 +294,7 @@ struct shash_alg nx_shash_sha512_alg = {
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct nx_crypto_ctx),
+- .cra_init = nx_crypto_ctx_sha_init,
++ .cra_init = nx_crypto_ctx_sha512_init,
+ .cra_exit = nx_crypto_ctx_exit,
+ }
+ };
+--- a/drivers/crypto/nx/nx.c
++++ b/drivers/crypto/nx/nx.c
+@@ -635,12 +635,14 @@ static int nx_crypto_ctx_init(struct nx_
+ /* entry points from the crypto tfm initializers */
+ int nx_crypto_ctx_aes_ccm_init(struct crypto_tfm *tfm)
+ {
++ tfm->crt_aead.reqsize = sizeof(struct nx_ccm_rctx);
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_CCM);
+ }
+
+ int nx_crypto_ctx_aes_gcm_init(struct crypto_tfm *tfm)
+ {
++ tfm->crt_aead.reqsize = sizeof(struct nx_gcm_rctx);
+ return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES,
+ NX_MODE_AES_GCM);
+ }
+--- a/drivers/crypto/nx/nx.h
++++ b/drivers/crypto/nx/nx.h
+@@ -2,6 +2,8 @@
+ #ifndef __NX_H__
+ #define __NX_H__
+
++#include <crypto/ctr.h>
++
+ #define NX_NAME "nx-crypto"
+ #define NX_STRING "IBM Power7+ Nest Accelerator Crypto Driver"
+ #define NX_VERSION "1.0"
+@@ -91,8 +93,11 @@ struct nx_crypto_driver {
+
+ #define NX_GCM4106_NONCE_LEN (4)
+ #define NX_GCM_CTR_OFFSET (12)
+-struct nx_gcm_priv {
++struct nx_gcm_rctx {
+ u8 iv[16];
++};
++
++struct nx_gcm_priv {
+ u8 iauth_tag[16];
+ u8 nonce[NX_GCM4106_NONCE_LEN];
+ };
+@@ -100,8 +105,11 @@ struct nx_gcm_priv {
+ #define NX_CCM_AES_KEY_LEN (16)
+ #define NX_CCM4309_AES_KEY_LEN (19)
+ #define NX_CCM4309_NONCE_LEN (3)
+-struct nx_ccm_priv {
++struct nx_ccm_rctx {
+ u8 iv[16];
++};
++
++struct nx_ccm_priv {
+ u8 b0[16];
+ u8 iauth_tag[16];
+ u8 oauth_tag[16];
+@@ -113,7 +121,7 @@ struct nx_xcbc_priv {
+ };
+
+ struct nx_ctr_priv {
+- u8 iv[16];
++ u8 nonce[CTR_RFC3686_NONCE_SIZE];
+ };
+
+ struct nx_crypto_ctx {
--- /dev/null
+From c3365ce130e50176533debe1cabebcdb8e61156c Mon Sep 17 00:00:00 2001
+From: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
+Date: Thu, 23 Apr 2015 17:40:30 -0300
+Subject: crypto: nx - Fixing NX data alignment with nx_sg list
+
+From: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
+
+commit c3365ce130e50176533debe1cabebcdb8e61156c upstream.
+
+In NX we need to pass always a 16 multiple size nx_sg_list to
+co processor. Trim function handle with this assuring all nx_sg_lists
+are 16 multiple size, although data was not being considerated when
+crop was done. It was causing an unalignment between size of the list
+and data, corrupting csbcpb fields returning a -23 H_ST_PARM error, or
+invalid operation.
+
+This patch fix this recalculating how much data should be put back
+in to_process variable what assures the size of sg_list will be
+correct with size of the data.
+
+Signed-off-by: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/nx/nx.c | 28 ++++++++++++++++++++++++----
+ 1 file changed, 24 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/nx/nx.c
++++ b/drivers/crypto/nx/nx.c
+@@ -215,8 +215,15 @@ struct nx_sg *nx_walk_and_build(struct n
+ * @delta: is the amount we need to crop in order to bound the list.
+ *
+ */
+-static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta)
++static long int trim_sg_list(struct nx_sg *sg,
++ struct nx_sg *end,
++ unsigned int delta,
++ unsigned int *nbytes)
+ {
++ long int oplen;
++ long int data_back;
++ unsigned int is_delta = delta;
++
+ while (delta && end > sg) {
+ struct nx_sg *last = end - 1;
+
+@@ -228,7 +235,20 @@ static long int trim_sg_list(struct nx_s
+ delta -= last->len;
+ }
+ }
+- return (sg - end) * sizeof(struct nx_sg);
++
++ /* There are cases where we need to crop list in order to make it
++ * a block size multiple, but we also need to align data. In order to
++ * that we need to calculate how much we need to put back to be
++ * processed
++ */
++ oplen = (sg - end) * sizeof(struct nx_sg);
++ if (is_delta) {
++ data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len;
++ data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1));
++ *nbytes -= data_back;
++ }
++
++ return oplen;
+ }
+
+ /**
+@@ -330,8 +350,8 @@ int nx_build_sg_lists(struct nx_crypto_c
+ /* these lengths should be negative, which will indicate to phyp that
+ * the input and output parameters are scatterlists, not linear
+ * buffers */
+- nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta);
+- nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta);
++ nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes);
++ nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes);
+
+ return 0;
+ }
--- /dev/null
+From 10d87b730e1d9f1442cae6487bb3aef8632bed23 Mon Sep 17 00:00:00 2001
+From: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
+Date: Thu, 23 Apr 2015 17:41:43 -0300
+Subject: crypto: nx - Fixing SHA update bug
+
+From: Leonidas Da Silva Barbosa <leosilva@linux.vnet.ibm.com>
+
+commit 10d87b730e1d9f1442cae6487bb3aef8632bed23 upstream.
+
+Bug happens when a data size less than SHA block size is passed.
+Since first attempt will be saved in buffer, second round attempt
+get into two step to calculate op.inlen and op.outlen. The issue
+resides in this step. A wrong value of op.inlen and outlen was being
+calculated.
+
+This patch fix this eliminate the nx_sha_build_sg_list, that is
+useless in SHA's algorithm context. Instead we call nx_build_sg_list
+directly and pass a previous calculated max_sg_len to it.
+
+Signed-off-by: Leonidas S. Barbosa <leosilva@linux.vnet.ibm.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/crypto/nx/nx-sha256.c | 84 ++++++++++++++++++++++++-----------------
+ drivers/crypto/nx/nx-sha512.c | 85 ++++++++++++++++++++++++------------------
+ drivers/crypto/nx/nx.c | 47 -----------------------
+ drivers/crypto/nx/nx.h | 2
+ 4 files changed, 99 insertions(+), 119 deletions(-)
+
+--- a/drivers/crypto/nx/nx-sha256.c
++++ b/drivers/crypto/nx/nx-sha256.c
+@@ -33,8 +33,9 @@ static int nx_sha256_init(struct shash_d
+ {
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
++ struct nx_sg *out_sg;
+ int len;
+- int rc;
++ u32 max_sg_len;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+@@ -44,15 +45,18 @@ static int nx_sha256_init(struct shash_d
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256);
+
++ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++ max_sg_len = min_t(u64, max_sg_len,
++ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ len = SHA256_DIGEST_SIZE;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+- &nx_ctx->op.outlen,
+- &len,
+- (u8 *) sctx->state,
+- NX_DS_SHA256);
++ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++ &len, max_sg_len);
++ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+- if (rc)
+- goto out;
++ if (len != SHA256_DIGEST_SIZE)
++ return -EINVAL;
+
+ sctx->state[0] = __cpu_to_be32(SHA256_H0);
+ sctx->state[1] = __cpu_to_be32(SHA256_H1);
+@@ -64,7 +68,6 @@ static int nx_sha256_init(struct shash_d
+ sctx->state[7] = __cpu_to_be32(SHA256_H7);
+ sctx->count = 0;
+
+-out:
+ return 0;
+ }
+
+@@ -74,10 +77,12 @@ static int nx_sha256_update(struct shash
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++ struct nx_sg *in_sg;
+ u64 to_process = 0, leftover, total;
+ unsigned long irq_flags;
+ int rc = 0;
+ int data_len;
++ u32 max_sg_len;
+ u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE);
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+@@ -97,6 +102,12 @@ static int nx_sha256_update(struct shash
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
++ in_sg = nx_ctx->in_sg;
++ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++ max_sg_len = min_t(u64, max_sg_len,
++ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ do {
+ /*
+ * to_process: the SHA256_BLOCK_SIZE data chunk to process in
+@@ -108,25 +119,22 @@ static int nx_sha256_update(struct shash
+
+ if (buf_len) {
+ data_len = buf_len;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+- &nx_ctx->op.inlen,
+- &data_len,
+- (u8 *) sctx->buf,
+- NX_DS_SHA256);
++ in_sg = nx_build_sg_list(nx_ctx->in_sg,
++ (u8 *) sctx->buf,
++ &data_len,
++ max_sg_len);
+
+- if (rc || data_len != buf_len)
++ if (data_len != buf_len) {
++ rc = -EINVAL;
+ goto out;
++ }
+ }
+
+ data_len = to_process - buf_len;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+- &nx_ctx->op.inlen,
+- &data_len,
+- (u8 *) data,
+- NX_DS_SHA256);
++ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
++ &data_len, max_sg_len);
+
+- if (rc)
+- goto out;
++ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+ to_process = (data_len + buf_len);
+ leftover = total - to_process;
+@@ -173,12 +181,19 @@ static int nx_sha256_final(struct shash_
+ struct sha256_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++ struct nx_sg *in_sg, *out_sg;
+ unsigned long irq_flags;
+- int rc;
++ u32 max_sg_len;
++ int rc = 0;
+ int len;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
++ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++ max_sg_len = min_t(u64, max_sg_len,
++ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count >= SHA256_BLOCK_SIZE) {
+@@ -195,25 +210,24 @@ static int nx_sha256_final(struct shash_
+ csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8);
+
+ len = sctx->count & (SHA256_BLOCK_SIZE - 1);
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+- &nx_ctx->op.inlen,
+- &len,
+- (u8 *) sctx->buf,
+- NX_DS_SHA256);
++ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf,
++ &len, max_sg_len);
+
+- if (rc || len != (sctx->count & (SHA256_BLOCK_SIZE - 1)))
++ if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) {
++ rc = -EINVAL;
+ goto out;
++ }
+
+ len = SHA256_DIGEST_SIZE;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+- &nx_ctx->op.outlen,
+- &len,
+- out,
+- NX_DS_SHA256);
++ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len);
+
+- if (rc || len != SHA256_DIGEST_SIZE)
++ if (len != SHA256_DIGEST_SIZE) {
++ rc = -EINVAL;
+ goto out;
++ }
+
++ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
++ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+ goto out;
+--- a/drivers/crypto/nx/nx-sha512.c
++++ b/drivers/crypto/nx/nx-sha512.c
+@@ -32,8 +32,9 @@ static int nx_sha512_init(struct shash_d
+ {
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
++ struct nx_sg *out_sg;
+ int len;
+- int rc;
++ u32 max_sg_len;
+
+ nx_ctx_init(nx_ctx, HCOP_FC_SHA);
+
+@@ -43,15 +44,18 @@ static int nx_sha512_init(struct shash_d
+
+ NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512);
+
++ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++ max_sg_len = min_t(u64, max_sg_len,
++ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ len = SHA512_DIGEST_SIZE;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+- &nx_ctx->op.outlen,
+- &len,
+- (u8 *)sctx->state,
+- NX_DS_SHA512);
++ out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state,
++ &len, max_sg_len);
++ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+- if (rc || len != SHA512_DIGEST_SIZE)
+- goto out;
++ if (len != SHA512_DIGEST_SIZE)
++ return -EINVAL;
+
+ sctx->state[0] = __cpu_to_be64(SHA512_H0);
+ sctx->state[1] = __cpu_to_be64(SHA512_H1);
+@@ -63,7 +67,6 @@ static int nx_sha512_init(struct shash_d
+ sctx->state[7] = __cpu_to_be64(SHA512_H7);
+ sctx->count[0] = 0;
+
+-out:
+ return 0;
+ }
+
+@@ -73,10 +76,12 @@ static int nx_sha512_update(struct shash
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++ struct nx_sg *in_sg;
+ u64 to_process, leftover = 0, total;
+ unsigned long irq_flags;
+ int rc = 0;
+ int data_len;
++ u32 max_sg_len;
+ u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE);
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+@@ -96,6 +101,12 @@ static int nx_sha512_update(struct shash
+ NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
+ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
+
++ in_sg = nx_ctx->in_sg;
++ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++ max_sg_len = min_t(u64, max_sg_len,
++ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ do {
+ /*
+ * to_process: the SHA512_BLOCK_SIZE data chunk to process in
+@@ -108,25 +119,26 @@ static int nx_sha512_update(struct shash
+
+ if (buf_len) {
+ data_len = buf_len;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+- &nx_ctx->op.inlen,
+- &data_len,
+- (u8 *) sctx->buf,
+- NX_DS_SHA512);
++ in_sg = nx_build_sg_list(nx_ctx->in_sg,
++ (u8 *) sctx->buf,
++ &data_len, max_sg_len);
+
+- if (rc || data_len != buf_len)
++ if (data_len != buf_len) {
++ rc = -EINVAL;
+ goto out;
++ }
+ }
+
+ data_len = to_process - buf_len;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+- &nx_ctx->op.inlen,
+- &data_len,
+- (u8 *) data,
+- NX_DS_SHA512);
++ in_sg = nx_build_sg_list(in_sg, (u8 *) data,
++ &data_len, max_sg_len);
++
++ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
+
+- if (rc || data_len != (to_process - buf_len))
++ if (data_len != (to_process - buf_len)) {
++ rc = -EINVAL;
+ goto out;
++ }
+
+ to_process = (data_len + buf_len);
+ leftover = total - to_process;
+@@ -172,13 +184,20 @@ static int nx_sha512_final(struct shash_
+ struct sha512_state *sctx = shash_desc_ctx(desc);
+ struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base);
+ struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb;
++ struct nx_sg *in_sg, *out_sg;
++ u32 max_sg_len;
+ u64 count0;
+ unsigned long irq_flags;
+- int rc;
++ int rc = 0;
+ int len;
+
+ spin_lock_irqsave(&nx_ctx->lock, irq_flags);
+
++ max_sg_len = min_t(u64, nx_ctx->ap->sglen,
++ nx_driver.of.max_sg_len/sizeof(struct nx_sg));
++ max_sg_len = min_t(u64, max_sg_len,
++ nx_ctx->ap->databytelen/NX_PAGE_SIZE);
++
+ /* final is represented by continuing the operation and indicating that
+ * this is not an intermediate operation */
+ if (sctx->count[0] >= SHA512_BLOCK_SIZE) {
+@@ -200,24 +219,20 @@ static int nx_sha512_final(struct shash_
+ csbcpb->cpb.sha512.message_bit_length_lo = count0;
+
+ len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1);
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->in_sg,
+- &nx_ctx->op.inlen,
+- &len,
+- (u8 *)sctx->buf,
+- NX_DS_SHA512);
++ in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len,
++ max_sg_len);
+
+- if (rc || len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1)))
++ if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) {
++ rc = -EINVAL;
+ goto out;
++ }
+
+ len = SHA512_DIGEST_SIZE;
+- rc = nx_sha_build_sg_list(nx_ctx, nx_ctx->out_sg,
+- &nx_ctx->op.outlen,
+- &len,
+- out,
+- NX_DS_SHA512);
++ out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len,
++ max_sg_len);
+
+- if (rc)
+- goto out;
++ nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
++ nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
+
+ if (!nx_ctx->op.outlen) {
+ rc = -EINVAL;
+--- a/drivers/crypto/nx/nx.c
++++ b/drivers/crypto/nx/nx.c
+@@ -252,53 +252,6 @@ static long int trim_sg_list(struct nx_s
+ }
+
+ /**
+- * nx_sha_build_sg_list - walk and build sg list to sha modes
+- * using right bounds and limits.
+- * @nx_ctx: NX crypto context for the lists we're building
+- * @nx_sg: current sg list in or out list
+- * @op_len: current op_len to be used in order to build a sg list
+- * @nbytes: number or bytes to be processed
+- * @offset: buf offset
+- * @mode: SHA256 or SHA512
+- */
+-int nx_sha_build_sg_list(struct nx_crypto_ctx *nx_ctx,
+- struct nx_sg *nx_in_outsg,
+- s64 *op_len,
+- unsigned int *nbytes,
+- u8 *offset,
+- u32 mode)
+-{
+- unsigned int delta = 0;
+- unsigned int total = *nbytes;
+- struct nx_sg *nx_insg = nx_in_outsg;
+- unsigned int max_sg_len;
+-
+- max_sg_len = min_t(u64, nx_ctx->ap->sglen,
+- nx_driver.of.max_sg_len/sizeof(struct nx_sg));
+- max_sg_len = min_t(u64, max_sg_len,
+- nx_ctx->ap->databytelen/NX_PAGE_SIZE);
+-
+- *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen);
+- nx_insg = nx_build_sg_list(nx_insg, offset, nbytes, max_sg_len);
+-
+- switch (mode) {
+- case NX_DS_SHA256:
+- if (*nbytes < total)
+- delta = *nbytes - (*nbytes & ~(SHA256_BLOCK_SIZE - 1));
+- break;
+- case NX_DS_SHA512:
+- if (*nbytes < total)
+- delta = *nbytes - (*nbytes & ~(SHA512_BLOCK_SIZE - 1));
+- break;
+- default:
+- return -EINVAL;
+- }
+- *op_len = trim_sg_list(nx_in_outsg, nx_insg, delta);
+-
+- return 0;
+-}
+-
+-/**
+ * nx_build_sg_lists - walk the input scatterlists and build arrays of NX
+ * scatterlists based on them.
+ *
+--- a/drivers/crypto/nx/nx.h
++++ b/drivers/crypto/nx/nx.h
+@@ -153,8 +153,6 @@ void nx_crypto_ctx_exit(struct crypto_tf
+ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function);
+ int nx_hcall_sync(struct nx_crypto_ctx *ctx, struct vio_pfo_op *op,
+ u32 may_sleep);
+-int nx_sha_build_sg_list(struct nx_crypto_ctx *, struct nx_sg *,
+- s64 *, unsigned int *, u8 *, u32);
+ struct nx_sg *nx_build_sg_list(struct nx_sg *, u8 *, unsigned int *, u32);
+ int nx_build_sg_lists(struct nx_crypto_ctx *, struct blkcipher_desc *,
+ struct scatterlist *, struct scatterlist *, unsigned int *,
--- /dev/null
+From 1c8a38b1268aebc1a903b21b11575077e02d2cf7 Mon Sep 17 00:00:00 2001
+From: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+Date: Tue, 30 Jun 2015 14:36:57 +0200
+Subject: dmaengine: at_xdmac: fix transfer data width in at_xdmac_prep_slave_sg()
+
+From: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+
+commit 1c8a38b1268aebc1a903b21b11575077e02d2cf7 upstream.
+
+This patch adds the missing update of the transfer data width in
+at_xdmac_prep_slave_sg().
+
+Indeed, for each item in the scatter-gather list, we check whether the
+transfer length is aligned with the data width provided by
+dmaengine_slave_config(). If so, we directly use this data width for the
+current part of the transfer we are preparing. Otherwise, the data width
+is reduced to 8 bits (1 byte). Of course, the actual number of register
+accesses must also be updated to match the new data width.
+
+So one chunk was missing in the original patch (see Fixes tag below): the
+number of register accesses was correctly set to (len >> fixed_dwidth) in
+mbr_ubc but the real data width was not updated in mbr_cfg. Since mbr_cfg
+may change for each part of the scatter-gather transfer this also explains
+why the original patch used the Descriptor View 2 instead of the
+Descriptor View 1.
+
+Let's take the example of a DMA transfer to write 8bit data into an Atmel
+USART with FIFOs. When FIFOs are enabled in the USART, its Transmit
+Holding Register (THR) works in multidata mode, that is to say that up to
+4 8bit data can be written into the THR in a single 32bit access and it is
+still possible to write only one data with a 8bit access. To take
+advantage of this new feature, the DMA driver was modified to allow
+multiple dwidths when doing slave transfers.
+For instance, when the total length is 22 bytes, the USART driver splits
+the transfer into 2 parts:
+
+First part: 20 bytes transferred through 5 32bit writes into THR
+Second part: 2 bytes transferred though 2 8bit writes into THR
+
+For the second part, the data width was first set to 4_BYTES by the USART
+driver thanks to dmaengine_slave_config() then at_xdmac_prep_slave_sg()
+reduces this data width to 1_BYTE because the 2 byte length is not aligned
+with the original 4_BYTES data width. Since the data width is modified,
+the actual number of writes into THR must be set accordingly.
+
+Signed-off-by: Cyrille Pitchen <cyrille.pitchen@atmel.com>
+Fixes: 6d3a7d9e3ada ("dmaengine: at_xdmac: allow muliple dwidths when doing slave transfers")
+Cc: stable@vger.kernel.org #4.0 and later
+Acked-by: Nicolas Ferre <nicolas.ferre@atmel.com>
+Acked-by: Ludovic Desroches <ludovic.desroches@atmel.com>
+Signed-off-by: Vinod Koul <vinod.koul@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/at_xdmac.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/dma/at_xdmac.c
++++ b/drivers/dma/at_xdmac.c
+@@ -648,16 +648,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *
+ desc->lld.mbr_sa = mem;
+ desc->lld.mbr_da = atchan->sconfig.dst_addr;
+ }
+- desc->lld.mbr_cfg = atchan->cfg;
+- dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
++ dwidth = at_xdmac_get_dwidth(atchan->cfg);
+ fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
+- ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
++ ? dwidth
+ : AT_XDMAC_CC_DWIDTH_BYTE;
+ desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
+ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
+ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
+ | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
+ | (len >> fixed_dwidth); /* microblock length */
++ desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
++ AT_XDMAC_CC_DWIDTH(fixed_dwidth);
+ dev_dbg(chan2dev(chan),
+ "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
+ __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
x86-nmi-64-improve-nested-nmi-comments.patch
x86-nmi-64-reorder-nested-nmi-checks.patch
x86-nmi-64-use-df-to-avoid-userspace-rsp-confusing-nested-nmi-detection.patch
+dmaengine-at_xdmac-fix-transfer-data-width-in-at_xdmac_prep_slave_sg.patch
+crypto-nx-fixing-nx-data-alignment-with-nx_sg-list.patch
+crypto-nx-fixing-sha-update-bug.patch
+crypto-nx-fix-reentrancy-bugs.patch