--- /dev/null
+From 0328ac267564089d9cedfb568f936d30a6debd21 Mon Sep 17 00:00:00 2001
+From: Uri Simchoni <uri@jdland.co.il>
+Date: Thu, 8 Apr 2010 19:25:37 +0300
+Subject: [PATCH] crypto: mv_cesa - Invoke the user callback from a softirq
+ context
+
+Invoke the user callback from a softirq context
+
+Signed-off-by: Uri Simchoni <uri@jdland.co.il>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ drivers/crypto/mv_cesa.c | 2 ++
+ 1 files changed, 2 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index b21ef63..3e60ba9 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -275,7 +275,9 @@ static void dequeue_complete_req(void)
+ sg_miter_stop(&cpg->p.dst_sg_it);
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
++ local_bh_disable();
+ req->base.complete(&req->base, 0);
++ local_bh_enable();
+ }
+ }
+
+--
+1.7.6.5
+From 6bc6fcd609080461682c5cc0a1e3bf4345d6419d Mon Sep 17 00:00:00 2001
+From: Uri Simchoni <uri@jdland.co.il>
+Date: Thu, 8 Apr 2010 19:25:56 +0300
+Subject: [PATCH] crypto: mv_cesa - Remove compiler warning in mv_cesa driver
+
+Remove compiler warning
+
+Signed-off-by: Uri Simchoni <uri@jdland.co.il>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ drivers/crypto/mv_cesa.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 3e60ba9..37d9f06 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -178,6 +178,7 @@ static void mv_process_current_q(int first_block)
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_ECB;
+ break;
+ case COP_AES_CBC:
++ default:
+ op.config = CFG_OP_CRYPT_ONLY | CFG_ENCM_AES | CFG_ENC_MODE_CBC;
+ op.enc_iv = ENC_IV_POINT(SRAM_DATA_IV) |
+ ENC_IV_BUF_POINT(SRAM_DATA_IV_BUF);
+--
+1.7.6.5
+X-Git-Url: http://git.kernelconcepts.de/?p=mv-sheeva.git;a=blobdiff_plain;f=drivers%2Fcrypto%2Fmv_cesa.c;h=018a95ce0c9b0d6e6a13bff1522630799bc445b3;hp=37d9f0688e7575a3e366f6bb9eda5adc5db807b5;hb=f565e67ec1b8f4a95d21550f9b879fe86b4132e0;hpb=6bc6fcd609080461682c5cc0a1e3bf4345d6419d
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 37d9f06..018a95c 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -242,6 +242,8 @@ static void dequeue_complete_req(void)
+ struct ablkcipher_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
++ int need_copy_len = cpg->p.crypt_len;
++ int sram_offset = 0;
+
+ cpg->p.total_req_bytes += cpg->p.crypt_len;
+ do {
+@@ -257,14 +259,16 @@ static void dequeue_complete_req(void)
+ buf = cpg->p.dst_sg_it.addr;
+ buf += cpg->p.dst_start;
+
+- dst_copy = min(cpg->p.crypt_len, cpg->p.sg_dst_left);
+-
+- memcpy(buf, cpg->sram + SRAM_DATA_OUT_START, dst_copy);
++ dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
+
++ memcpy(buf,
++ cpg->sram + SRAM_DATA_OUT_START + sram_offset,
++ dst_copy);
++ sram_offset += dst_copy;
+ cpg->p.sg_dst_left -= dst_copy;
+- cpg->p.crypt_len -= dst_copy;
++ need_copy_len -= dst_copy;
+ cpg->p.dst_start += dst_copy;
+- } while (cpg->p.crypt_len > 0);
++ } while (need_copy_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.total_req_bytes < req->nbytes) {
+From 15d4dd3594221f11a7730fcf2d5f9942b96cdd7e Mon Sep 17 00:00:00 2001
+From: Uri Simchoni <uri@jdland.co.il>
+Date: Thu, 8 Apr 2010 19:27:02 +0300
+Subject: [PATCH] crypto: mv_cesa - Fix situations where the src sglist spans
+ more data than the request asks for
+
+Fix for situations where the source scatterlist spans more data than the
+request nbytes
+
+Signed-off-by: Uri Simchoni <uri@jdland.co.il>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ drivers/crypto/mv_cesa.c | 66 ++++++++++++++++++++++++++++++---------------
+ 1 files changed, 44 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 018a95c..096f9ff 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -143,27 +143,45 @@ static int mv_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+ return 0;
+ }
+
+-static void setup_data_in(struct ablkcipher_request *req)
++static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
+ {
+ int ret;
+- void *buf;
+-
+- if (!cpg->p.sg_src_left) {
+- ret = sg_miter_next(&cpg->p.src_sg_it);
+- BUG_ON(!ret);
+- cpg->p.sg_src_left = cpg->p.src_sg_it.length;
+- cpg->p.src_start = 0;
+- }
++ void *sbuf;
++ int copied = 0;
+
+- cpg->p.crypt_len = min(cpg->p.sg_src_left, cpg->max_req_size);
+-
+- buf = cpg->p.src_sg_it.addr;
+- buf += cpg->p.src_start;
++ while (1) {
++ if (!p->sg_src_left) {
++ ret = sg_miter_next(&p->src_sg_it);
++ BUG_ON(!ret);
++ p->sg_src_left = p->src_sg_it.length;
++ p->src_start = 0;
++ }
+
+- memcpy(cpg->sram + SRAM_DATA_IN_START, buf, cpg->p.crypt_len);
++ sbuf = p->src_sg_it.addr + p->src_start;
++
++ if (p->sg_src_left <= len - copied) {
++ memcpy(dbuf + copied, sbuf, p->sg_src_left);
++ copied += p->sg_src_left;
++ p->sg_src_left = 0;
++ if (copied >= len)
++ break;
++ } else {
++ int copy_len = len - copied;
++ memcpy(dbuf + copied, sbuf, copy_len);
++ p->src_start += copy_len;
++ p->sg_src_left -= copy_len;
++ break;
++ }
++ }
++}
+
+- cpg->p.sg_src_left -= cpg->p.crypt_len;
+- cpg->p.src_start += cpg->p.crypt_len;
++static void setup_data_in(struct ablkcipher_request *req)
++{
++ struct req_progress *p = &cpg->p;
++ p->crypt_len =
++ min((int)req->nbytes - p->total_req_bytes, cpg->max_req_size);
++ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
++ p->crypt_len);
+ }
+
+ static void mv_process_current_q(int first_block)
+@@ -289,12 +307,16 @@ static void dequeue_complete_req(void)
+ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+ {
+ int i = 0;
+-
+- do {
+- total_bytes -= sl[i].length;
+- i++;
+-
+- } while (total_bytes > 0);
++ size_t cur_len;
++
++ while (1) {
++ cur_len = sl[i].length;
++ ++i;
++ if (total_bytes > cur_len)
++ total_bytes -= cur_len;
++ else
++ break;
++ }
+
+ return i;
+ }
+--
+1.7.6.5
+From 3b61a90502481045f56c1c41a2af35ee48ca8b80 Mon Sep 17 00:00:00 2001
+From: Uri Simchoni <uri@jdland.co.il>
+Date: Thu, 8 Apr 2010 19:27:33 +0300
+Subject: [PATCH] crypto: mv_cesa - Enqueue generic async requests
+
+Enqueue generic async requests rather than ablkcipher requests
+in the driver's queue
+
+Signed-off-by: Uri Simchoni <uri@jdland.co.il>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ drivers/crypto/mv_cesa.c | 43 ++++++++++++++++++++++++-------------------
+ 1 files changed, 24 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 096f9ff..8891e2e 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -39,6 +39,7 @@ enum engine_status {
+ * @sg_src_left: bytes left in src to process (scatter list)
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
++ * @hw_nbytes: total bytes to process in hw for this request
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @total_req_bytes: total number of bytes processed (request).
+@@ -55,6 +56,7 @@ struct req_progress {
+ int sg_src_left;
+ int src_start;
+ int crypt_len;
++ int hw_nbytes;
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+@@ -71,7 +73,7 @@ struct crypto_priv {
+ spinlock_t lock;
+ struct crypto_queue queue;
+ enum engine_status eng_st;
+- struct ablkcipher_request *cur_req;
++ struct crypto_async_request *cur_req;
+ struct req_progress p;
+ int max_req_size;
+ int sram_size;
+@@ -175,18 +177,18 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
+ }
+ }
+
+-static void setup_data_in(struct ablkcipher_request *req)
++static void setup_data_in(void)
+ {
+ struct req_progress *p = &cpg->p;
+ p->crypt_len =
+- min((int)req->nbytes - p->total_req_bytes, cpg->max_req_size);
++ min(p->hw_nbytes - p->total_req_bytes, cpg->max_req_size);
+ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
+ p->crypt_len);
+ }
+
+ static void mv_process_current_q(int first_block)
+ {
+- struct ablkcipher_request *req = cpg->cur_req;
++ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
+ struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+ struct sec_accel_config op;
+@@ -229,7 +231,7 @@ static void mv_process_current_q(int first_block)
+ ENC_P_DST(SRAM_DATA_OUT_START);
+ op.enc_key_p = SRAM_DATA_KEY_P;
+
+- setup_data_in(req);
++ setup_data_in();
+ op.enc_len = cpg->p.crypt_len;
+ memcpy(cpg->sram + SRAM_CONFIG, &op,
+ sizeof(struct sec_accel_config));
+@@ -246,7 +248,7 @@ static void mv_process_current_q(int first_block)
+
+ static void mv_crypto_algo_completion(void)
+ {
+- struct ablkcipher_request *req = cpg->cur_req;
++ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
+ if (req_ctx->op != COP_AES_CBC)
+@@ -257,7 +259,7 @@ static void mv_crypto_algo_completion(void)
+
+ static void dequeue_complete_req(void)
+ {
+- struct ablkcipher_request *req = cpg->cur_req;
++ struct crypto_async_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+ int need_copy_len = cpg->p.crypt_len;
+@@ -289,7 +291,7 @@ static void dequeue_complete_req(void)
+ } while (need_copy_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+- if (cpg->p.total_req_bytes < req->nbytes) {
++ if (cpg->p.total_req_bytes < cpg->p.hw_nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+@@ -299,7 +301,7 @@ static void dequeue_complete_req(void)
+ mv_crypto_algo_completion();
+ cpg->eng_st = ENGINE_IDLE;
+ local_bh_disable();
+- req->base.complete(&req->base, 0);
++ req->complete(req, 0);
+ local_bh_enable();
+ }
+ }
+@@ -323,16 +325,19 @@ static int count_sgs(struct scatterlist *sl, unsigned int total_bytes)
+
+ static void mv_enqueue_new_req(struct ablkcipher_request *req)
+ {
++ struct req_progress *p = &cpg->p;
+ int num_sgs;
+
+- cpg->cur_req = req;
+- memset(&cpg->p, 0, sizeof(struct req_progress));
++ cpg->cur_req = &req->base;
++ memset(p, 0, sizeof(struct req_progress));
++ p->hw_nbytes = req->nbytes;
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+- sg_miter_start(&cpg->p.src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
++ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+
+ num_sgs = count_sgs(req->dst, req->nbytes);
+- sg_miter_start(&cpg->p.dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
++ sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG);
++
+ mv_process_current_q(1);
+ }
+
+@@ -378,13 +383,13 @@ static int queue_manag(void *data)
+ return 0;
+ }
+
+-static int mv_handle_req(struct ablkcipher_request *req)
++static int mv_handle_req(struct crypto_async_request *req)
+ {
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&cpg->lock, flags);
+- ret = ablkcipher_enqueue_request(&cpg->queue, req);
++ ret = crypto_enqueue_request(&cpg->queue, req);
+ spin_unlock_irqrestore(&cpg->lock, flags);
+ wake_up_process(cpg->queue_th);
+ return ret;
+@@ -397,7 +402,7 @@ static int mv_enc_aes_ecb(struct ablkcipher_request *req)
+ req_ctx->op = COP_AES_ECB;
+ req_ctx->decrypt = 0;
+
+- return mv_handle_req(req);
++ return mv_handle_req(&req->base);
+ }
+
+ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+@@ -409,7 +414,7 @@ static int mv_dec_aes_ecb(struct ablkcipher_request *req)
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+- return mv_handle_req(req);
++ return mv_handle_req(&req->base);
+ }
+
+ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+@@ -419,7 +424,7 @@ static int mv_enc_aes_cbc(struct ablkcipher_request *req)
+ req_ctx->op = COP_AES_CBC;
+ req_ctx->decrypt = 0;
+
+- return mv_handle_req(req);
++ return mv_handle_req(&req->base);
+ }
+
+ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+@@ -431,7 +436,7 @@ static int mv_dec_aes_cbc(struct ablkcipher_request *req)
+ req_ctx->decrypt = 1;
+
+ compute_aes_dec_key(ctx);
+- return mv_handle_req(req);
++ return mv_handle_req(&req->base);
+ }
+
+ static int mv_cra_init(struct crypto_tfm *tfm)
+--
+1.7.6.5
+From 7a5f691ef03f4c01d2703b5ec4ddd4c17e645dec Mon Sep 17 00:00:00 2001
+From: Uri Simchoni <uri@jdland.co.il>
+Date: Thu, 8 Apr 2010 19:29:16 +0300
+Subject: [PATCH] crypto: mv_cesa - Rename a variable to a more suitable name
+
+Rename a variable to a more suitable name
+
+Signed-off-by: Uri Simchoni <uri@jdland.co.il>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ drivers/crypto/mv_cesa.c | 10 +++++-----
+ 1 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 8891e2e..4262932 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -42,7 +42,7 @@ enum engine_status {
+ * @hw_nbytes: total bytes to process in hw for this request
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+- * @total_req_bytes: total number of bytes processed (request).
++ * @hw_processed_bytes: number of bytes processed by hw (request).
+ *
+ * sg helper are used to iterate over the scatterlist. Since the size of the
+ * SRAM may be less than the scatter size, this struct struct is used to keep
+@@ -60,7 +60,7 @@ struct req_progress {
+ /* dst mostly */
+ int sg_dst_left;
+ int dst_start;
+- int total_req_bytes;
++ int hw_processed_bytes;
+ };
+
+ struct crypto_priv {
+@@ -181,7 +181,7 @@ static void setup_data_in(void)
+ {
+ struct req_progress *p = &cpg->p;
+ p->crypt_len =
+- min(p->hw_nbytes - p->total_req_bytes, cpg->max_req_size);
++ min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
+ p->crypt_len);
+ }
+@@ -265,7 +265,7 @@ static void dequeue_complete_req(void)
+ int need_copy_len = cpg->p.crypt_len;
+ int sram_offset = 0;
+
+- cpg->p.total_req_bytes += cpg->p.crypt_len;
++ cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+ do {
+ int dst_copy;
+
+@@ -291,7 +291,7 @@ static void dequeue_complete_req(void)
+ } while (need_copy_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+- if (cpg->p.total_req_bytes < cpg->p.hw_nbytes) {
++ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+ mv_process_current_q(0);
+--
+1.6.5.GIT
+From a58094ac5f95d6969e5c52ff096d2fd2864542af Mon Sep 17 00:00:00 2001
+From: Uri Simchoni <uri@jdland.co.il>
+Date: Thu, 8 Apr 2010 19:30:19 +0300
+Subject: [PATCH] crypto: mv_cesa - Execute some code via function pointers
+ rathr than direct calls
+
+Execute some code via function pointers rathr than direct calls
+(to allow customization in the hashing request)
+
+Signed-off-by: Uri Simchoni <uri@jdland.co.il>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ drivers/crypto/mv_cesa.c | 13 +++++++++----
+ 1 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 4262932..2b4f07a 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -51,6 +51,8 @@ enum engine_status {
+ struct req_progress {
+ struct sg_mapping_iter src_sg_it;
+ struct sg_mapping_iter dst_sg_it;
++ void (*complete) (void);
++ void (*process) (int is_first);
+
+ /* src mostly */
+ int sg_src_left;
+@@ -251,6 +253,9 @@ static void mv_crypto_algo_completion(void)
+ struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req);
+ struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req);
+
++ sg_miter_stop(&cpg->p.src_sg_it);
++ sg_miter_stop(&cpg->p.dst_sg_it);
++
+ if (req_ctx->op != COP_AES_CBC)
+ return ;
+
+@@ -294,11 +299,9 @@ static void dequeue_complete_req(void)
+ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
+ /* process next scatter list entry */
+ cpg->eng_st = ENGINE_BUSY;
+- mv_process_current_q(0);
++ cpg->p.process(0);
+ } else {
+- sg_miter_stop(&cpg->p.src_sg_it);
+- sg_miter_stop(&cpg->p.dst_sg_it);
+- mv_crypto_algo_completion();
++ cpg->p.complete();
+ cpg->eng_st = ENGINE_IDLE;
+ local_bh_disable();
+ req->complete(req, 0);
+@@ -331,6 +334,8 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
+ cpg->cur_req = &req->base;
+ memset(p, 0, sizeof(struct req_progress));
+ p->hw_nbytes = req->nbytes;
++ p->complete = mv_crypto_algo_completion;
++ p->process = mv_process_current_q;
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+--
+1.7.6.5
+X-Git-Url: http://git.kernelconcepts.de/?p=mv-sheeva.git;a=blobdiff_plain;f=drivers%2Fcrypto%2Fmv_cesa.c;h=49a22060fb51a46afff004ddade3dacf378aecd8;hp=2b4f07aa89e8ba7422de65eea027e06daa5f7797;hb=f0d03deaad05d9cc99cd2ee0475c9ecd726c19ae;hpb=a58094ac5f95d6969e5c52ff096d2fd2864542af
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 2b4f07a..49a2206 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -40,6 +40,7 @@ enum engine_status {
+ * @src_start: offset to add to src start position (scatter list)
+ * @crypt_len: length of current crypt process
+ * @hw_nbytes: total bytes to process in hw for this request
++ * @copy_back: whether to copy data back (crypt) or not (hash)
+ * @sg_dst_left: bytes left dst to process in this scatter list
+ * @dst_start: offset to add to dst start position (scatter list)
+ * @hw_processed_bytes: number of bytes processed by hw (request).
+@@ -60,6 +61,7 @@ struct req_progress {
+ int crypt_len;
+ int hw_nbytes;
+ /* dst mostly */
++ int copy_back;
+ int sg_dst_left;
+ int dst_start;
+ int hw_processed_bytes;
+@@ -267,33 +269,35 @@ static void dequeue_complete_req(void)
+ struct crypto_async_request *req = cpg->cur_req;
+ void *buf;
+ int ret;
+- int need_copy_len = cpg->p.crypt_len;
+- int sram_offset = 0;
+-
+ cpg->p.hw_processed_bytes += cpg->p.crypt_len;
+- do {
+- int dst_copy;
++ if (cpg->p.copy_back) {
++ int need_copy_len = cpg->p.crypt_len;
++ int sram_offset = 0;
++ do {
++ int dst_copy;
++
++ if (!cpg->p.sg_dst_left) {
++ ret = sg_miter_next(&cpg->p.dst_sg_it);
++ BUG_ON(!ret);
++ cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
++ cpg->p.dst_start = 0;
++ }
+
+- if (!cpg->p.sg_dst_left) {
+- ret = sg_miter_next(&cpg->p.dst_sg_it);
+- BUG_ON(!ret);
+- cpg->p.sg_dst_left = cpg->p.dst_sg_it.length;
+- cpg->p.dst_start = 0;
+- }
++ buf = cpg->p.dst_sg_it.addr;
++ buf += cpg->p.dst_start;
+
+- buf = cpg->p.dst_sg_it.addr;
+- buf += cpg->p.dst_start;
++ dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
+
+- dst_copy = min(need_copy_len, cpg->p.sg_dst_left);
++ memcpy(buf,
++ cpg->sram + SRAM_DATA_OUT_START + sram_offset,
++ dst_copy);
++ sram_offset += dst_copy;
++ cpg->p.sg_dst_left -= dst_copy;
++ need_copy_len -= dst_copy;
++ cpg->p.dst_start += dst_copy;
++ } while (need_copy_len > 0);
++ }
+
+- memcpy(buf,
+- cpg->sram + SRAM_DATA_OUT_START + sram_offset,
+- dst_copy);
+- sram_offset += dst_copy;
+- cpg->p.sg_dst_left -= dst_copy;
+- need_copy_len -= dst_copy;
+- cpg->p.dst_start += dst_copy;
+- } while (need_copy_len > 0);
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
+@@ -336,6 +340,7 @@ static void mv_enqueue_new_req(struct ablkcipher_request *req)
+ p->hw_nbytes = req->nbytes;
+ p->complete = mv_crypto_algo_completion;
+ p->process = mv_process_current_q;
++ p->copy_back = 1;
+
+ num_sgs = count_sgs(req->src, req->nbytes);
+ sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG);
+From 0c5c6c4bae8fe9ae3d86b44c332eb1267df1ec99 Mon Sep 17 00:00:00 2001
+From: Uri Simchoni <uri@jdland.co.il>
+Date: Thu, 8 Apr 2010 19:33:26 +0300
+Subject: [PATCH] crypto: mv_cesa - Support processing of data from previous
+ requests
+
+Support processing of data from previous requests (as in hashing
+update/final requests).
+
+Signed-off-by: Uri Simchoni <uri@jdland.co.il>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+---
+ drivers/crypto/mv_cesa.c | 8 +++++---
+ 1 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
+index 49a2206..d0fb10e 100644
+--- a/drivers/crypto/mv_cesa.c
++++ b/drivers/crypto/mv_cesa.c
+@@ -184,10 +184,11 @@ static void copy_src_to_buf(struct req_progress *p, char *dbuf, int len)
+ static void setup_data_in(void)
+ {
+ struct req_progress *p = &cpg->p;
+- p->crypt_len =
++ int data_in_sram =
+ min(p->hw_nbytes - p->hw_processed_bytes, cpg->max_req_size);
+- copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START,
+- p->crypt_len);
++ copy_src_to_buf(p, cpg->sram + SRAM_DATA_IN_START + p->crypt_len,
++ data_in_sram - p->crypt_len);
++ p->crypt_len = data_in_sram;
+ }
+
+ static void mv_process_current_q(int first_block)
+@@ -298,6 +299,7 @@ static void dequeue_complete_req(void)
+ } while (need_copy_len > 0);
+ }
+
++ cpg->p.crypt_len = 0;
+
+ BUG_ON(cpg->eng_st != ENGINE_W_DEQUEUE);
+ if (cpg->p.hw_processed_bytes < cpg->p.hw_nbytes) {
+--
+1.7.6.5