]> git.ipfire.org Git - people/ms/linux.git/blame - drivers/crypto/sahara.c
Merge back earlier cpufreq fixes for v4.4.
[people/ms/linux.git] / drivers / crypto / sahara.c
CommitLineData
5de88752
JM
1/*
2 * Cryptographic API.
3 *
4 * Support for SAHARA cryptographic accelerator.
5 *
5a2bb93f 6 * Copyright (c) 2014 Steffen Trumtrar <s.trumtrar@pengutronix.de>
5de88752
JM
7 * Copyright (c) 2013 Vista Silicon S.L.
8 * Author: Javier Martin <javier.martin@vista-silicon.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 * Based on omap-aes.c and tegra-aes.c
15 */
16
17#include <crypto/algapi.h>
18#include <crypto/aes.h>
5a2bb93f
ST
19#include <crypto/hash.h>
20#include <crypto/internal/hash.h>
21#include <crypto/scatterwalk.h>
22#include <crypto/sha.h>
5de88752
JM
23
24#include <linux/clk.h>
25#include <linux/crypto.h>
26#include <linux/interrupt.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/kernel.h>
c0c3c89a 30#include <linux/kthread.h>
5de88752 31#include <linux/module.h>
c0c3c89a 32#include <linux/mutex.h>
5de88752 33#include <linux/of.h>
5ed903b3 34#include <linux/of_device.h>
5de88752
JM
35#include <linux/platform_device.h>
36
5a2bb93f
ST
37#define SHA_BUFFER_LEN PAGE_SIZE
38#define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
39
5de88752
JM
40#define SAHARA_NAME "sahara"
41#define SAHARA_VERSION_3 3
5ed903b3 42#define SAHARA_VERSION_4 4
5de88752
JM
43#define SAHARA_TIMEOUT_MS 1000
44#define SAHARA_MAX_HW_DESC 2
45#define SAHARA_MAX_HW_LINK 20
46
47#define FLAGS_MODE_MASK 0x000f
48#define FLAGS_ENCRYPT BIT(0)
49#define FLAGS_CBC BIT(1)
50#define FLAGS_NEW_KEY BIT(3)
5de88752
JM
51
52#define SAHARA_HDR_BASE 0x00800000
53#define SAHARA_HDR_SKHA_ALG_AES 0
54#define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55#define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56#define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57#define SAHARA_HDR_FORM_DATA (5 << 16)
58#define SAHARA_HDR_FORM_KEY (8 << 16)
59#define SAHARA_HDR_LLO (1 << 24)
60#define SAHARA_HDR_CHA_SKHA (1 << 28)
61#define SAHARA_HDR_CHA_MDHA (2 << 28)
62#define SAHARA_HDR_PARITY_BIT (1 << 31)
63
5a2bb93f
ST
64#define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65#define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66#define SAHARA_HDR_MDHA_HASH 0xA0850000
67#define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68#define SAHARA_HDR_MDHA_ALG_SHA1 0
69#define SAHARA_HDR_MDHA_ALG_MD5 1
70#define SAHARA_HDR_MDHA_ALG_SHA256 2
71#define SAHARA_HDR_MDHA_ALG_SHA224 3
72#define SAHARA_HDR_MDHA_PDATA (1 << 2)
73#define SAHARA_HDR_MDHA_HMAC (1 << 3)
74#define SAHARA_HDR_MDHA_INIT (1 << 5)
75#define SAHARA_HDR_MDHA_IPAD (1 << 6)
76#define SAHARA_HDR_MDHA_OPAD (1 << 7)
77#define SAHARA_HDR_MDHA_SWAP (1 << 8)
78#define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79#define SAHARA_HDR_MDHA_SSL (1 << 10)
80
5de88752
JM
81/* SAHARA can only process one request at a time */
82#define SAHARA_QUEUE_LENGTH 1
83
84#define SAHARA_REG_VERSION 0x00
85#define SAHARA_REG_DAR 0x04
86#define SAHARA_REG_CONTROL 0x08
87#define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88#define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89#define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90#define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91#define SAHARA_REG_CMD 0x0C
92#define SAHARA_CMD_RESET (1 << 0)
93#define SAHARA_CMD_CLEAR_INT (1 << 8)
94#define SAHARA_CMD_CLEAR_ERR (1 << 9)
95#define SAHARA_CMD_SINGLE_STEP (1 << 10)
96#define SAHARA_CMD_MODE_BATCH (1 << 16)
97#define SAHARA_CMD_MODE_DEBUG (1 << 18)
98#define SAHARA_REG_STATUS 0x10
99#define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100#define SAHARA_STATE_IDLE 0
101#define SAHARA_STATE_BUSY 1
102#define SAHARA_STATE_ERR 2
103#define SAHARA_STATE_FAULT 3
104#define SAHARA_STATE_COMPLETE 4
105#define SAHARA_STATE_COMP_FLAG (1 << 2)
106#define SAHARA_STATUS_DAR_FULL (1 << 3)
107#define SAHARA_STATUS_ERROR (1 << 4)
108#define SAHARA_STATUS_SECURE (1 << 5)
109#define SAHARA_STATUS_FAIL (1 << 6)
110#define SAHARA_STATUS_INIT (1 << 7)
111#define SAHARA_STATUS_RNG_RESEED (1 << 8)
112#define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113#define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114#define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115#define SAHARA_STATUS_MODE_BATCH (1 << 16)
116#define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117#define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118#define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119#define SAHARA_REG_ERRSTATUS 0x14
120#define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121#define SAHARA_ERRSOURCE_CHA 14
122#define SAHARA_ERRSOURCE_DMA 15
123#define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124#define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125#define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126#define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127#define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128#define SAHARA_REG_FADDR 0x18
129#define SAHARA_REG_CDAR 0x1C
130#define SAHARA_REG_IDAR 0x20
131
132struct sahara_hw_desc {
133 u32 hdr;
134 u32 len1;
135 dma_addr_t p1;
136 u32 len2;
137 dma_addr_t p2;
138 dma_addr_t next;
139};
140
141struct sahara_hw_link {
142 u32 len;
143 dma_addr_t p;
144 dma_addr_t next;
145};
146
147struct sahara_ctx {
5de88752 148 unsigned long flags;
5a2bb93f
ST
149
150 /* AES-specific context */
5de88752
JM
151 int keylen;
152 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback;
5a2bb93f
ST
154
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
5de88752
JM
157};
158
159struct sahara_aes_reqctx {
160 unsigned long mode;
161};
162
5a2bb93f
ST
163/*
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
5a2bb93f
ST
176 * @total: total number of bytes for transfer
177 * @last: is this the last block
178 * @first: is this the first block
179 * @active: inside a transfer
180 */
181struct sahara_sha_reqctx {
182 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
183 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 context[SHA256_DIGEST_SIZE + 4];
185 struct mutex mutex;
186 unsigned int mode;
187 unsigned int digest_size;
188 unsigned int context_size;
189 unsigned int buf_cnt;
190 unsigned int sg_in_idx;
191 struct scatterlist *in_sg;
192 struct scatterlist in_sg_chain[2];
5a2bb93f
ST
193 size_t total;
194 unsigned int last;
195 unsigned int first;
196 unsigned int active;
197};
198
5de88752
JM
199struct sahara_dev {
200 struct device *device;
5ed903b3 201 unsigned int version;
5de88752
JM
202 void __iomem *regs_base;
203 struct clk *clk_ipg;
204 struct clk *clk_ahb;
c0c3c89a
ST
205 struct mutex queue_mutex;
206 struct task_struct *kthread;
207 struct completion dma_completion;
5de88752
JM
208
209 struct sahara_ctx *ctx;
210 spinlock_t lock;
211 struct crypto_queue queue;
212 unsigned long flags;
213
5de88752
JM
214 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
215 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
216
217 u8 *key_base;
218 dma_addr_t key_phys_base;
219
220 u8 *iv_base;
221 dma_addr_t iv_phys_base;
222
5a2bb93f
ST
223 u8 *context_base;
224 dma_addr_t context_phys_base;
225
5de88752
JM
226 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
227 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
228
5de88752
JM
229 size_t total;
230 struct scatterlist *in_sg;
231 unsigned int nb_in_sg;
232 struct scatterlist *out_sg;
233 unsigned int nb_out_sg;
234
235 u32 error;
5de88752
JM
236};
237
238static struct sahara_dev *dev_ptr;
239
240static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
241{
242 writel(data, dev->regs_base + reg);
243}
244
245static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
246{
247 return readl(dev->regs_base + reg);
248}
249
250static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
251{
252 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
253 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
254 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
255
256 if (dev->flags & FLAGS_CBC) {
257 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
258 hdr ^= SAHARA_HDR_PARITY_BIT;
259 }
260
261 if (dev->flags & FLAGS_ENCRYPT) {
262 hdr |= SAHARA_HDR_SKHA_OP_ENC;
263 hdr ^= SAHARA_HDR_PARITY_BIT;
264 }
265
266 return hdr;
267}
268
269static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
270{
271 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
272 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
273}
274
cac367bf 275static const char *sahara_err_src[16] = {
5de88752
JM
276 "No error",
277 "Header error",
278 "Descriptor length error",
279 "Descriptor length or pointer error",
280 "Link length error",
281 "Link pointer error",
282 "Input buffer error",
283 "Output buffer error",
284 "Output buffer starvation",
285 "Internal state fault",
286 "General descriptor problem",
287 "Reserved",
288 "Descriptor address error",
289 "Link address error",
290 "CHA error",
291 "DMA error"
292};
293
cac367bf 294static const char *sahara_err_dmasize[4] = {
5de88752
JM
295 "Byte transfer",
296 "Half-word transfer",
297 "Word transfer",
298 "Reserved"
299};
300
cac367bf 301static const char *sahara_err_dmasrc[8] = {
5de88752
JM
302 "No error",
303 "AHB bus error",
304 "Internal IP bus error",
305 "Parity error",
306 "DMA crosses 256 byte boundary",
307 "DMA is busy",
308 "Reserved",
309 "DMA HW error"
310};
311
cac367bf 312static const char *sahara_cha_errsrc[12] = {
5de88752
JM
313 "Input buffer non-empty",
314 "Illegal address",
315 "Illegal mode",
316 "Illegal data size",
317 "Illegal key size",
318 "Write during processing",
319 "CTX read during processing",
320 "HW error",
321 "Input buffer disabled/underflow",
322 "Output buffer disabled/overflow",
323 "DES key parity error",
324 "Reserved"
325};
326
cac367bf 327static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
5de88752
JM
328
329static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
330{
331 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
332 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
333
334 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
335
336 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
337
338 if (source == SAHARA_ERRSOURCE_DMA) {
339 if (error & SAHARA_ERRSTATUS_DMA_DIR)
340 dev_err(dev->device, " * DMA read.\n");
341 else
342 dev_err(dev->device, " * DMA write.\n");
343
344 dev_err(dev->device, " * %s.\n",
345 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
346 dev_err(dev->device, " * %s.\n",
347 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
348 } else if (source == SAHARA_ERRSOURCE_CHA) {
349 dev_err(dev->device, " * %s.\n",
350 sahara_cha_errsrc[chasrc]);
351 dev_err(dev->device, " * %s.\n",
352 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
353 }
354 dev_err(dev->device, "\n");
355}
356
cac367bf 357static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
5de88752
JM
358
359static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
360{
361 u8 state;
362
363 if (!IS_ENABLED(DEBUG))
364 return;
365
366 state = SAHARA_STATUS_GET_STATE(status);
367
368 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
369 __func__, status);
370
371 dev_dbg(dev->device, " - State = %d:\n", state);
372 if (state & SAHARA_STATE_COMP_FLAG)
373 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
374
375 dev_dbg(dev->device, " * %s.\n",
376 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
377
378 if (status & SAHARA_STATUS_DAR_FULL)
379 dev_dbg(dev->device, " - DAR Full.\n");
380 if (status & SAHARA_STATUS_ERROR)
381 dev_dbg(dev->device, " - Error.\n");
382 if (status & SAHARA_STATUS_SECURE)
383 dev_dbg(dev->device, " - Secure.\n");
384 if (status & SAHARA_STATUS_FAIL)
385 dev_dbg(dev->device, " - Fail.\n");
386 if (status & SAHARA_STATUS_RNG_RESEED)
387 dev_dbg(dev->device, " - RNG Reseed Request.\n");
388 if (status & SAHARA_STATUS_ACTIVE_RNG)
389 dev_dbg(dev->device, " - RNG Active.\n");
390 if (status & SAHARA_STATUS_ACTIVE_MDHA)
391 dev_dbg(dev->device, " - MDHA Active.\n");
392 if (status & SAHARA_STATUS_ACTIVE_SKHA)
393 dev_dbg(dev->device, " - SKHA Active.\n");
394
395 if (status & SAHARA_STATUS_MODE_BATCH)
396 dev_dbg(dev->device, " - Batch Mode.\n");
397 else if (status & SAHARA_STATUS_MODE_DEDICATED)
398 dev_dbg(dev->device, " - Decidated Mode.\n");
399 else if (status & SAHARA_STATUS_MODE_DEBUG)
400 dev_dbg(dev->device, " - Debug Mode.\n");
401
402 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
403 SAHARA_STATUS_GET_ISTATE(status));
404
405 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
406 sahara_read(dev, SAHARA_REG_CDAR));
407 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
408 sahara_read(dev, SAHARA_REG_IDAR));
409}
410
411static void sahara_dump_descriptors(struct sahara_dev *dev)
412{
413 int i;
414
415 if (!IS_ENABLED(DEBUG))
416 return;
417
418 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
419 dev_dbg(dev->device, "Descriptor (%d) (0x%08x):\n",
420 i, dev->hw_phys_desc[i]);
421 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
422 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
423 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
424 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
425 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
426 dev_dbg(dev->device, "\tnext = 0x%08x\n",
427 dev->hw_desc[i]->next);
428 }
429 dev_dbg(dev->device, "\n");
430}
431
432static void sahara_dump_links(struct sahara_dev *dev)
433{
434 int i;
435
436 if (!IS_ENABLED(DEBUG))
437 return;
438
439 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
440 dev_dbg(dev->device, "Link (%d) (0x%08x):\n",
441 i, dev->hw_phys_link[i]);
442 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
443 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
444 dev_dbg(dev->device, "\tnext = 0x%08x\n",
445 dev->hw_link[i]->next);
446 }
447 dev_dbg(dev->device, "\n");
448}
449
5de88752
JM
450static int sahara_hw_descriptor_create(struct sahara_dev *dev)
451{
452 struct sahara_ctx *ctx = dev->ctx;
453 struct scatterlist *sg;
454 int ret;
455 int i, j;
1711045f 456 int idx = 0;
5de88752
JM
457
458 /* Copy new key if necessary */
459 if (ctx->flags & FLAGS_NEW_KEY) {
460 memcpy(dev->key_base, ctx->key, ctx->keylen);
461 ctx->flags &= ~FLAGS_NEW_KEY;
462
463 if (dev->flags & FLAGS_CBC) {
1711045f
ST
464 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
465 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
5de88752 466 } else {
1711045f
ST
467 dev->hw_desc[idx]->len1 = 0;
468 dev->hw_desc[idx]->p1 = 0;
5de88752 469 }
1711045f
ST
470 dev->hw_desc[idx]->len2 = ctx->keylen;
471 dev->hw_desc[idx]->p2 = dev->key_phys_base;
472 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
473
474 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
475
476 idx++;
5de88752 477 }
5de88752 478
d23afa1a
LC
479 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
480 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
5de88752
JM
481 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
482 dev_err(dev->device, "not enough hw links (%d)\n",
483 dev->nb_in_sg + dev->nb_out_sg);
484 return -EINVAL;
485 }
486
487 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
488 DMA_TO_DEVICE);
489 if (ret != dev->nb_in_sg) {
490 dev_err(dev->device, "couldn't map in sg\n");
491 goto unmap_in;
492 }
493 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
494 DMA_FROM_DEVICE);
495 if (ret != dev->nb_out_sg) {
496 dev_err(dev->device, "couldn't map out sg\n");
497 goto unmap_out;
498 }
499
500 /* Create input links */
1711045f 501 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
5de88752
JM
502 sg = dev->in_sg;
503 for (i = 0; i < dev->nb_in_sg; i++) {
504 dev->hw_link[i]->len = sg->length;
505 dev->hw_link[i]->p = sg->dma_address;
506 if (i == (dev->nb_in_sg - 1)) {
507 dev->hw_link[i]->next = 0;
508 } else {
509 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
510 sg = sg_next(sg);
511 }
512 }
513
514 /* Create output links */
1711045f 515 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
5de88752
JM
516 sg = dev->out_sg;
517 for (j = i; j < dev->nb_out_sg + i; j++) {
518 dev->hw_link[j]->len = sg->length;
519 dev->hw_link[j]->p = sg->dma_address;
520 if (j == (dev->nb_out_sg + i - 1)) {
521 dev->hw_link[j]->next = 0;
522 } else {
523 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
524 sg = sg_next(sg);
525 }
526 }
527
528 /* Fill remaining fields of hw_desc[1] */
1711045f
ST
529 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
530 dev->hw_desc[idx]->len1 = dev->total;
531 dev->hw_desc[idx]->len2 = dev->total;
532 dev->hw_desc[idx]->next = 0;
5de88752
JM
533
534 sahara_dump_descriptors(dev);
535 sahara_dump_links(dev);
536
5de88752
JM
537 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
538
539 return 0;
540
541unmap_out:
542 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
543 DMA_TO_DEVICE);
544unmap_in:
545 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
546 DMA_FROM_DEVICE);
547
548 return -EINVAL;
549}
550
c0c3c89a 551static int sahara_aes_process(struct ablkcipher_request *req)
5de88752 552{
c0c3c89a 553 struct sahara_dev *dev = dev_ptr;
5de88752
JM
554 struct sahara_ctx *ctx;
555 struct sahara_aes_reqctx *rctx;
5de88752 556 int ret;
58ed798b 557 unsigned long timeout;
5de88752 558
5de88752
JM
559 /* Request is ready to be dispatched by the device */
560 dev_dbg(dev->device,
561 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
562 req->nbytes, req->src, req->dst);
563
564 /* assign new request to device */
5de88752
JM
565 dev->total = req->nbytes;
566 dev->in_sg = req->src;
567 dev->out_sg = req->dst;
568
569 rctx = ablkcipher_request_ctx(req);
570 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
571 rctx->mode &= FLAGS_MODE_MASK;
572 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
573
574 if ((dev->flags & FLAGS_CBC) && req->info)
575 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
576
577 /* assign new context to device */
5de88752
JM
578 dev->ctx = ctx;
579
c0c3c89a
ST
580 reinit_completion(&dev->dma_completion);
581
5de88752 582 ret = sahara_hw_descriptor_create(dev);
6cf02fca
NMG
583 if (ret)
584 return -EINVAL;
c0c3c89a 585
58ed798b 586 timeout = wait_for_completion_timeout(&dev->dma_completion,
c0c3c89a 587 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 588 if (!timeout) {
c0c3c89a
ST
589 dev_err(dev->device, "AES timeout\n");
590 return -ETIMEDOUT;
5de88752 591 }
c0c3c89a
ST
592
593 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
594 DMA_TO_DEVICE);
595 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
596 DMA_FROM_DEVICE);
597
598 return 0;
5de88752
JM
599}
600
601static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
602 unsigned int keylen)
603{
604 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
605 int ret;
606
607 ctx->keylen = keylen;
608
609 /* SAHARA only supports 128bit keys */
610 if (keylen == AES_KEYSIZE_128) {
611 memcpy(ctx->key, key, keylen);
612 ctx->flags |= FLAGS_NEW_KEY;
613 return 0;
614 }
615
616 if (keylen != AES_KEYSIZE_128 &&
617 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
618 return -EINVAL;
619
620 /*
621 * The requested key size is not supported by HW, do a fallback.
622 */
623 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
624 ctx->fallback->base.crt_flags |=
625 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
626
627 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
628 if (ret) {
629 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
630
631 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
632 tfm_aux->crt_flags |=
633 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
634 }
635 return ret;
636}
637
638static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
639{
5de88752
JM
640 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
641 struct sahara_dev *dev = dev_ptr;
642 int err = 0;
5de88752
JM
643
644 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
645 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
646
647 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
648 dev_err(dev->device,
649 "request size is not exact amount of AES blocks\n");
650 return -EINVAL;
651 }
652
5de88752 653 rctx->mode = mode;
c0c3c89a
ST
654
655 mutex_lock(&dev->queue_mutex);
5de88752 656 err = ablkcipher_enqueue_request(&dev->queue, req);
c0c3c89a 657 mutex_unlock(&dev->queue_mutex);
5de88752 658
c0c3c89a 659 wake_up_process(dev->kthread);
5de88752
JM
660
661 return err;
662}
663
664static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
665{
666 struct crypto_tfm *tfm =
667 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
668 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
669 crypto_ablkcipher_reqtfm(req));
670 int err;
671
672 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
673 ablkcipher_request_set_tfm(req, ctx->fallback);
674 err = crypto_ablkcipher_encrypt(req);
675 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
676 return err;
677 }
678
679 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
680}
681
682static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
683{
684 struct crypto_tfm *tfm =
685 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
686 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
687 crypto_ablkcipher_reqtfm(req));
688 int err;
689
690 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
691 ablkcipher_request_set_tfm(req, ctx->fallback);
692 err = crypto_ablkcipher_decrypt(req);
693 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
694 return err;
695 }
696
697 return sahara_aes_crypt(req, 0);
698}
699
700static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
701{
702 struct crypto_tfm *tfm =
703 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
704 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
705 crypto_ablkcipher_reqtfm(req));
706 int err;
707
708 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
709 ablkcipher_request_set_tfm(req, ctx->fallback);
710 err = crypto_ablkcipher_encrypt(req);
711 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
712 return err;
713 }
714
715 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
716}
717
718static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
719{
720 struct crypto_tfm *tfm =
721 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
722 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
723 crypto_ablkcipher_reqtfm(req));
724 int err;
725
726 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
727 ablkcipher_request_set_tfm(req, ctx->fallback);
728 err = crypto_ablkcipher_decrypt(req);
729 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
730 return err;
731 }
732
733 return sahara_aes_crypt(req, FLAGS_CBC);
734}
735
736static int sahara_aes_cra_init(struct crypto_tfm *tfm)
737{
efa59e2e 738 const char *name = crypto_tfm_alg_name(tfm);
5de88752
JM
739 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
740
741 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
742 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
743 if (IS_ERR(ctx->fallback)) {
744 pr_err("Error allocating fallback algo %s\n", name);
745 return PTR_ERR(ctx->fallback);
746 }
747
748 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
749
750 return 0;
751}
752
753static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
754{
755 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
756
757 if (ctx->fallback)
758 crypto_free_ablkcipher(ctx->fallback);
759 ctx->fallback = NULL;
760}
761
5a2bb93f
ST
762static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
763 struct sahara_sha_reqctx *rctx)
764{
765 u32 hdr = 0;
766
767 hdr = rctx->mode;
768
769 if (rctx->first) {
770 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
771 hdr |= SAHARA_HDR_MDHA_INIT;
772 } else {
773 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
774 }
775
776 if (rctx->last)
777 hdr |= SAHARA_HDR_MDHA_PDATA;
778
779 if (hweight_long(hdr) % 2 == 0)
780 hdr |= SAHARA_HDR_PARITY_BIT;
781
782 return hdr;
783}
784
785static int sahara_sha_hw_links_create(struct sahara_dev *dev,
786 struct sahara_sha_reqctx *rctx,
787 int start)
788{
789 struct scatterlist *sg;
790 unsigned int i;
791 int ret;
792
793 dev->in_sg = rctx->in_sg;
794
d23afa1a 795 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
5a2bb93f
ST
796 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
797 dev_err(dev->device, "not enough hw links (%d)\n",
798 dev->nb_in_sg + dev->nb_out_sg);
799 return -EINVAL;
800 }
801
640eec52
LC
802 sg = dev->in_sg;
803 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
804 if (!ret)
805 return -EFAULT;
806
807 for (i = start; i < dev->nb_in_sg + start; i++) {
808 dev->hw_link[i]->len = sg->length;
809 dev->hw_link[i]->p = sg->dma_address;
810 if (i == (dev->nb_in_sg + start - 1)) {
811 dev->hw_link[i]->next = 0;
812 } else {
5a2bb93f
ST
813 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
814 sg = sg_next(sg);
5a2bb93f
ST
815 }
816 }
817
818 return i;
819}
820
821static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
822 struct sahara_sha_reqctx *rctx,
823 struct ahash_request *req,
824 int index)
825{
826 unsigned result_len;
827 int i = index;
828
829 if (rctx->first)
830 /* Create initial descriptor: #8*/
831 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
832 else
833 /* Create hash descriptor: #10. Must follow #6. */
834 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
835
836 dev->hw_desc[index]->len1 = rctx->total;
837 if (dev->hw_desc[index]->len1 == 0) {
838 /* if len1 is 0, p1 must be 0, too */
839 dev->hw_desc[index]->p1 = 0;
840 rctx->sg_in_idx = 0;
841 } else {
842 /* Create input links */
843 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
844 i = sahara_sha_hw_links_create(dev, rctx, index);
845
846 rctx->sg_in_idx = index;
847 if (i < 0)
848 return i;
849 }
850
851 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
852
853 /* Save the context for the next operation */
854 result_len = rctx->context_size;
855 dev->hw_link[i]->p = dev->context_phys_base;
856
857 dev->hw_link[i]->len = result_len;
858 dev->hw_desc[index]->len2 = result_len;
859
860 dev->hw_link[i]->next = 0;
861
862 return 0;
863}
864
865/*
866 * Load descriptor aka #6
867 *
868 * To load a previously saved context back to the MDHA unit
869 *
870 * p1: Saved Context
871 * p2: NULL
872 *
873 */
874static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
875 struct sahara_sha_reqctx *rctx,
876 struct ahash_request *req,
877 int index)
878{
879 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
880
881 dev->hw_desc[index]->len1 = rctx->context_size;
882 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
883 dev->hw_desc[index]->len2 = 0;
884 dev->hw_desc[index]->p2 = 0;
885
886 dev->hw_link[index]->len = rctx->context_size;
887 dev->hw_link[index]->p = dev->context_phys_base;
888 dev->hw_link[index]->next = 0;
889
890 return 0;
891}
892
893static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
894{
895 if (!sg || !sg->length)
896 return nbytes;
897
898 while (nbytes && sg) {
899 if (nbytes <= sg->length) {
900 sg->length = nbytes;
901 sg_mark_end(sg);
902 break;
903 }
904 nbytes -= sg->length;
5be4d4c9 905 sg = sg_next(sg);
5a2bb93f
ST
906 }
907
908 return nbytes;
909}
910
911static int sahara_sha_prepare_request(struct ahash_request *req)
912{
913 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
914 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
915 unsigned int hash_later;
916 unsigned int block_size;
917 unsigned int len;
918
919 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
920
921 /* append bytes from previous operation */
922 len = rctx->buf_cnt + req->nbytes;
923
924 /* only the last transfer can be padded in hardware */
925 if (!rctx->last && (len < block_size)) {
926 /* to few data, save for next operation */
927 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
928 0, req->nbytes, 0);
929 rctx->buf_cnt += req->nbytes;
930
931 return 0;
932 }
933
934 /* add data from previous operation first */
935 if (rctx->buf_cnt)
936 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
937
938 /* data must always be a multiple of block_size */
939 hash_later = rctx->last ? 0 : len & (block_size - 1);
940 if (hash_later) {
941 unsigned int offset = req->nbytes - hash_later;
942 /* Save remaining bytes for later use */
943 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
944 hash_later, 0);
945 }
946
947 /* nbytes should now be multiple of blocksize */
948 req->nbytes = req->nbytes - hash_later;
949
950 sahara_walk_and_recalc(req->src, req->nbytes);
951
952 /* have data from previous operation and current */
953 if (rctx->buf_cnt && req->nbytes) {
954 sg_init_table(rctx->in_sg_chain, 2);
955 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
956
c56f6d12 957 sg_chain(rctx->in_sg_chain, 2, req->src);
5a2bb93f
ST
958
959 rctx->total = req->nbytes + rctx->buf_cnt;
960 rctx->in_sg = rctx->in_sg_chain;
961
5a2bb93f
ST
962 req->src = rctx->in_sg_chain;
963 /* only data from previous operation */
964 } else if (rctx->buf_cnt) {
965 if (req->src)
966 rctx->in_sg = req->src;
967 else
968 rctx->in_sg = rctx->in_sg_chain;
969 /* buf was copied into rembuf above */
970 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
971 rctx->total = rctx->buf_cnt;
5a2bb93f
ST
972 /* no data from previous operation */
973 } else {
974 rctx->in_sg = req->src;
975 rctx->total = req->nbytes;
976 req->src = rctx->in_sg;
5a2bb93f
ST
977 }
978
979 /* on next call, we only have the remaining data in the buffer */
980 rctx->buf_cnt = hash_later;
981
982 return -EINPROGRESS;
983}
984
5a2bb93f
ST
985static int sahara_sha_process(struct ahash_request *req)
986{
987 struct sahara_dev *dev = dev_ptr;
988 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
df586cbb 989 int ret;
58ed798b 990 unsigned long timeout;
5a2bb93f
ST
991
992 ret = sahara_sha_prepare_request(req);
993 if (!ret)
994 return ret;
995
996 if (rctx->first) {
997 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
998 dev->hw_desc[0]->next = 0;
999 rctx->first = 0;
1000 } else {
1001 memcpy(dev->context_base, rctx->context, rctx->context_size);
1002
1003 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1004 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1005 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1006 dev->hw_desc[1]->next = 0;
1007 }
1008
1009 sahara_dump_descriptors(dev);
1010 sahara_dump_links(dev);
1011
1012 reinit_completion(&dev->dma_completion);
1013
1014 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1015
58ed798b 1016 timeout = wait_for_completion_timeout(&dev->dma_completion,
5a2bb93f 1017 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
58ed798b 1018 if (!timeout) {
5a2bb93f
ST
1019 dev_err(dev->device, "SHA timeout\n");
1020 return -ETIMEDOUT;
1021 }
1022
1023 if (rctx->sg_in_idx)
640eec52
LC
1024 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1025 DMA_TO_DEVICE);
5a2bb93f
ST
1026
1027 memcpy(rctx->context, dev->context_base, rctx->context_size);
1028
1029 if (req->result)
1030 memcpy(req->result, rctx->context, rctx->digest_size);
1031
1032 return 0;
1033}
1034
c0c3c89a
ST
1035static int sahara_queue_manage(void *data)
1036{
1037 struct sahara_dev *dev = (struct sahara_dev *)data;
1038 struct crypto_async_request *async_req;
ddacc621 1039 struct crypto_async_request *backlog;
c0c3c89a
ST
1040 int ret = 0;
1041
1042 do {
1043 __set_current_state(TASK_INTERRUPTIBLE);
1044
1045 mutex_lock(&dev->queue_mutex);
ddacc621 1046 backlog = crypto_get_backlog(&dev->queue);
c0c3c89a
ST
1047 async_req = crypto_dequeue_request(&dev->queue);
1048 mutex_unlock(&dev->queue_mutex);
1049
ddacc621
ST
1050 if (backlog)
1051 backlog->complete(backlog, -EINPROGRESS);
1052
c0c3c89a 1053 if (async_req) {
5a2bb93f
ST
1054 if (crypto_tfm_alg_type(async_req->tfm) ==
1055 CRYPTO_ALG_TYPE_AHASH) {
1056 struct ahash_request *req =
1057 ahash_request_cast(async_req);
1058
1059 ret = sahara_sha_process(req);
1060 } else {
1061 struct ablkcipher_request *req =
1062 ablkcipher_request_cast(async_req);
c0c3c89a 1063
5a2bb93f
ST
1064 ret = sahara_aes_process(req);
1065 }
c0c3c89a
ST
1066
1067 async_req->complete(async_req, ret);
1068
1069 continue;
1070 }
1071
1072 schedule();
1073 } while (!kthread_should_stop());
1074
1075 return 0;
1076}
1077
5a2bb93f
ST
1078static int sahara_sha_enqueue(struct ahash_request *req, int last)
1079{
1080 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1081 struct sahara_dev *dev = dev_ptr;
1082 int ret;
1083
1084 if (!req->nbytes && !last)
1085 return 0;
1086
1087 mutex_lock(&rctx->mutex);
1088 rctx->last = last;
1089
1090 if (!rctx->active) {
1091 rctx->active = 1;
1092 rctx->first = 1;
1093 }
1094
1095 mutex_lock(&dev->queue_mutex);
1096 ret = crypto_enqueue_request(&dev->queue, &req->base);
1097 mutex_unlock(&dev->queue_mutex);
1098
1099 wake_up_process(dev->kthread);
1100 mutex_unlock(&rctx->mutex);
1101
1102 return ret;
1103}
1104
1105static int sahara_sha_init(struct ahash_request *req)
1106{
1107 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1108 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1109
1110 memset(rctx, 0, sizeof(*rctx));
1111
1112 switch (crypto_ahash_digestsize(tfm)) {
1113 case SHA1_DIGEST_SIZE:
1114 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1115 rctx->digest_size = SHA1_DIGEST_SIZE;
1116 break;
1117 case SHA256_DIGEST_SIZE:
1118 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1119 rctx->digest_size = SHA256_DIGEST_SIZE;
1120 break;
1121 default:
1122 return -EINVAL;
1123 }
1124
1125 rctx->context_size = rctx->digest_size + 4;
1126 rctx->active = 0;
1127
1128 mutex_init(&rctx->mutex);
1129
1130 return 0;
1131}
1132
1133static int sahara_sha_update(struct ahash_request *req)
1134{
1135 return sahara_sha_enqueue(req, 0);
1136}
1137
1138static int sahara_sha_final(struct ahash_request *req)
1139{
1140 req->nbytes = 0;
1141 return sahara_sha_enqueue(req, 1);
1142}
1143
1144static int sahara_sha_finup(struct ahash_request *req)
1145{
1146 return sahara_sha_enqueue(req, 1);
1147}
1148
1149static int sahara_sha_digest(struct ahash_request *req)
1150{
1151 sahara_sha_init(req);
1152
1153 return sahara_sha_finup(req);
1154}
1155
1156static int sahara_sha_export(struct ahash_request *req, void *out)
1157{
1158 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1159 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1160 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1161
1162 memcpy(out, ctx, sizeof(struct sahara_ctx));
1163 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1164 sizeof(struct sahara_sha_reqctx));
1165
1166 return 0;
1167}
1168
1169static int sahara_sha_import(struct ahash_request *req, const void *in)
1170{
1171 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1172 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1173 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1174
1175 memcpy(ctx, in, sizeof(struct sahara_ctx));
1176 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1177 sizeof(struct sahara_sha_reqctx));
1178
1179 return 0;
1180}
1181
1182static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1183{
1184 const char *name = crypto_tfm_alg_name(tfm);
1185 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1186
1187 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1188 CRYPTO_ALG_NEED_FALLBACK);
1189 if (IS_ERR(ctx->shash_fallback)) {
1190 pr_err("Error allocating fallback algo %s\n", name);
1191 return PTR_ERR(ctx->shash_fallback);
1192 }
1193 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1194 sizeof(struct sahara_sha_reqctx) +
1195 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1196
1197 return 0;
1198}
1199
1200static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1201{
1202 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1203
1204 crypto_free_shash(ctx->shash_fallback);
1205 ctx->shash_fallback = NULL;
1206}
1207
5de88752
JM
1208static struct crypto_alg aes_algs[] = {
1209{
1210 .cra_name = "ecb(aes)",
1211 .cra_driver_name = "sahara-ecb-aes",
1212 .cra_priority = 300,
1213 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1214 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1215 .cra_blocksize = AES_BLOCK_SIZE,
1216 .cra_ctxsize = sizeof(struct sahara_ctx),
1217 .cra_alignmask = 0x0,
1218 .cra_type = &crypto_ablkcipher_type,
1219 .cra_module = THIS_MODULE,
1220 .cra_init = sahara_aes_cra_init,
1221 .cra_exit = sahara_aes_cra_exit,
1222 .cra_u.ablkcipher = {
1223 .min_keysize = AES_MIN_KEY_SIZE ,
1224 .max_keysize = AES_MAX_KEY_SIZE,
1225 .setkey = sahara_aes_setkey,
1226 .encrypt = sahara_aes_ecb_encrypt,
1227 .decrypt = sahara_aes_ecb_decrypt,
1228 }
1229}, {
1230 .cra_name = "cbc(aes)",
1231 .cra_driver_name = "sahara-cbc-aes",
1232 .cra_priority = 300,
1233 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1234 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1235 .cra_blocksize = AES_BLOCK_SIZE,
1236 .cra_ctxsize = sizeof(struct sahara_ctx),
1237 .cra_alignmask = 0x0,
1238 .cra_type = &crypto_ablkcipher_type,
1239 .cra_module = THIS_MODULE,
1240 .cra_init = sahara_aes_cra_init,
1241 .cra_exit = sahara_aes_cra_exit,
1242 .cra_u.ablkcipher = {
1243 .min_keysize = AES_MIN_KEY_SIZE ,
1244 .max_keysize = AES_MAX_KEY_SIZE,
1245 .ivsize = AES_BLOCK_SIZE,
1246 .setkey = sahara_aes_setkey,
1247 .encrypt = sahara_aes_cbc_encrypt,
1248 .decrypt = sahara_aes_cbc_decrypt,
1249 }
1250}
1251};
1252
5a2bb93f
ST
1253static struct ahash_alg sha_v3_algs[] = {
1254{
1255 .init = sahara_sha_init,
1256 .update = sahara_sha_update,
1257 .final = sahara_sha_final,
1258 .finup = sahara_sha_finup,
1259 .digest = sahara_sha_digest,
1260 .export = sahara_sha_export,
1261 .import = sahara_sha_import,
1262 .halg.digestsize = SHA1_DIGEST_SIZE,
1263 .halg.base = {
1264 .cra_name = "sha1",
1265 .cra_driver_name = "sahara-sha1",
1266 .cra_priority = 300,
1267 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1268 CRYPTO_ALG_ASYNC |
1269 CRYPTO_ALG_NEED_FALLBACK,
1270 .cra_blocksize = SHA1_BLOCK_SIZE,
1271 .cra_ctxsize = sizeof(struct sahara_ctx),
1272 .cra_alignmask = 0,
1273 .cra_module = THIS_MODULE,
1274 .cra_init = sahara_sha_cra_init,
1275 .cra_exit = sahara_sha_cra_exit,
1276 }
1277},
1278};
1279
1280static struct ahash_alg sha_v4_algs[] = {
1281{
1282 .init = sahara_sha_init,
1283 .update = sahara_sha_update,
1284 .final = sahara_sha_final,
1285 .finup = sahara_sha_finup,
1286 .digest = sahara_sha_digest,
1287 .export = sahara_sha_export,
1288 .import = sahara_sha_import,
1289 .halg.digestsize = SHA256_DIGEST_SIZE,
1290 .halg.base = {
1291 .cra_name = "sha256",
1292 .cra_driver_name = "sahara-sha256",
1293 .cra_priority = 300,
1294 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1295 CRYPTO_ALG_ASYNC |
1296 CRYPTO_ALG_NEED_FALLBACK,
1297 .cra_blocksize = SHA256_BLOCK_SIZE,
1298 .cra_ctxsize = sizeof(struct sahara_ctx),
1299 .cra_alignmask = 0,
1300 .cra_module = THIS_MODULE,
1301 .cra_init = sahara_sha_cra_init,
1302 .cra_exit = sahara_sha_cra_exit,
1303 }
1304},
1305};
1306
5de88752
JM
1307static irqreturn_t sahara_irq_handler(int irq, void *data)
1308{
1309 struct sahara_dev *dev = (struct sahara_dev *)data;
1310 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1311 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1312
5de88752
JM
1313 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1314 SAHARA_REG_CMD);
1315
1316 sahara_decode_status(dev, stat);
1317
1318 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1319 return IRQ_NONE;
1320 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1321 dev->error = 0;
1322 } else {
1323 sahara_decode_error(dev, err);
1324 dev->error = -EINVAL;
1325 }
1326
c0c3c89a 1327 complete(&dev->dma_completion);
5de88752
JM
1328
1329 return IRQ_HANDLED;
1330}
1331
1332
1333static int sahara_register_algs(struct sahara_dev *dev)
1334{
5a2bb93f
ST
1335 int err;
1336 unsigned int i, j, k, l;
5de88752
JM
1337
1338 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1339 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1340 err = crypto_register_alg(&aes_algs[i]);
1341 if (err)
1342 goto err_aes_algs;
1343 }
1344
5a2bb93f
ST
1345 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1346 err = crypto_register_ahash(&sha_v3_algs[k]);
1347 if (err)
1348 goto err_sha_v3_algs;
1349 }
1350
1351 if (dev->version > SAHARA_VERSION_3)
1352 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1353 err = crypto_register_ahash(&sha_v4_algs[l]);
1354 if (err)
1355 goto err_sha_v4_algs;
1356 }
1357
5de88752
JM
1358 return 0;
1359
5a2bb93f
ST
1360err_sha_v4_algs:
1361 for (j = 0; j < l; j++)
1362 crypto_unregister_ahash(&sha_v4_algs[j]);
1363
1364err_sha_v3_algs:
1365 for (j = 0; j < k; j++)
1366 crypto_unregister_ahash(&sha_v4_algs[j]);
1367
5de88752
JM
1368err_aes_algs:
1369 for (j = 0; j < i; j++)
1370 crypto_unregister_alg(&aes_algs[j]);
1371
1372 return err;
1373}
1374
1375static void sahara_unregister_algs(struct sahara_dev *dev)
1376{
5a2bb93f 1377 unsigned int i;
5de88752
JM
1378
1379 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1380 crypto_unregister_alg(&aes_algs[i]);
5a2bb93f
ST
1381
1382 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1383 crypto_unregister_ahash(&sha_v3_algs[i]);
1384
1385 if (dev->version > SAHARA_VERSION_3)
1386 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1387 crypto_unregister_ahash(&sha_v4_algs[i]);
5de88752
JM
1388}
1389
1390static struct platform_device_id sahara_platform_ids[] = {
1391 { .name = "sahara-imx27" },
1392 { /* sentinel */ }
1393};
1394MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1395
1396static struct of_device_id sahara_dt_ids[] = {
5ed903b3 1397 { .compatible = "fsl,imx53-sahara" },
5de88752
JM
1398 { .compatible = "fsl,imx27-sahara" },
1399 { /* sentinel */ }
1400};
68be0b1a 1401MODULE_DEVICE_TABLE(of, sahara_dt_ids);
5de88752
JM
1402
1403static int sahara_probe(struct platform_device *pdev)
1404{
1405 struct sahara_dev *dev;
1406 struct resource *res;
1407 u32 version;
1408 int irq;
1409 int err;
1410 int i;
1411
1412 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1413 if (dev == NULL) {
1414 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1415 return -ENOMEM;
1416 }
1417
1418 dev->device = &pdev->dev;
1419 platform_set_drvdata(pdev, dev);
1420
1421 /* Get the base address */
1422 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
9e95275c
JH
1423 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1424 if (IS_ERR(dev->regs_base))
1425 return PTR_ERR(dev->regs_base);
5de88752
JM
1426
1427 /* Get the IRQ */
1428 irq = platform_get_irq(pdev, 0);
1429 if (irq < 0) {
1430 dev_err(&pdev->dev, "failed to get irq resource\n");
1431 return irq;
1432 }
1433
3d6f1d12
AS
1434 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1435 0, dev_name(&pdev->dev), dev);
1436 if (err) {
5de88752 1437 dev_err(&pdev->dev, "failed to request irq\n");
3d6f1d12 1438 return err;
5de88752
JM
1439 }
1440
1441 /* clocks */
1442 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1443 if (IS_ERR(dev->clk_ipg)) {
1444 dev_err(&pdev->dev, "Could not get ipg clock\n");
1445 return PTR_ERR(dev->clk_ipg);
1446 }
1447
1448 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1449 if (IS_ERR(dev->clk_ahb)) {
1450 dev_err(&pdev->dev, "Could not get ahb clock\n");
1451 return PTR_ERR(dev->clk_ahb);
1452 }
1453
1454 /* Allocate HW descriptors */
66c9a04e 1455 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
5de88752
JM
1456 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1457 &dev->hw_phys_desc[0], GFP_KERNEL);
1458 if (!dev->hw_desc[0]) {
1459 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1460 return -ENOMEM;
1461 }
1462 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1463 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1464 sizeof(struct sahara_hw_desc);
1465
1466 /* Allocate space for iv and key */
66c9a04e 1467 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
5de88752
JM
1468 &dev->key_phys_base, GFP_KERNEL);
1469 if (!dev->key_base) {
1470 dev_err(&pdev->dev, "Could not allocate memory for key\n");
66c9a04e 1471 return -ENOMEM;
5de88752
JM
1472 }
1473 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1474 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1475
5a2bb93f 1476 /* Allocate space for context: largest digest + message length field */
66c9a04e 1477 dev->context_base = dmam_alloc_coherent(&pdev->dev,
5a2bb93f
ST
1478 SHA256_DIGEST_SIZE + 4,
1479 &dev->context_phys_base, GFP_KERNEL);
1480 if (!dev->context_base) {
1481 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
66c9a04e 1482 return -ENOMEM;
5a2bb93f
ST
1483 }
1484
5de88752 1485 /* Allocate space for HW links */
66c9a04e 1486 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
5de88752
JM
1487 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1488 &dev->hw_phys_link[0], GFP_KERNEL);
393e661d 1489 if (!dev->hw_link[0]) {
5de88752 1490 dev_err(&pdev->dev, "Could not allocate hw links\n");
66c9a04e 1491 return -ENOMEM;
5de88752
JM
1492 }
1493 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1494 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1495 sizeof(struct sahara_hw_link);
1496 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1497 }
1498
1499 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1500
20ec9d81 1501 spin_lock_init(&dev->lock);
c0c3c89a 1502 mutex_init(&dev->queue_mutex);
20ec9d81 1503
5de88752
JM
1504 dev_ptr = dev;
1505
c0c3c89a
ST
1506 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1507 if (IS_ERR(dev->kthread)) {
66c9a04e 1508 return PTR_ERR(dev->kthread);
c0c3c89a 1509 }
5de88752 1510
c0c3c89a 1511 init_completion(&dev->dma_completion);
5de88752 1512
7eac7144
FE
1513 err = clk_prepare_enable(dev->clk_ipg);
1514 if (err)
66c9a04e 1515 return err;
7eac7144
FE
1516 err = clk_prepare_enable(dev->clk_ahb);
1517 if (err)
1518 goto clk_ipg_disable;
5de88752
JM
1519
1520 version = sahara_read(dev, SAHARA_REG_VERSION);
5ed903b3
ST
1521 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1522 if (version != SAHARA_VERSION_3)
1523 err = -ENODEV;
1524 } else if (of_device_is_compatible(pdev->dev.of_node,
1525 "fsl,imx53-sahara")) {
1526 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1527 err = -ENODEV;
1528 version = (version >> 8) & 0xff;
1529 }
1530 if (err == -ENODEV) {
5de88752 1531 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
5ed903b3 1532 version);
5de88752
JM
1533 goto err_algs;
1534 }
1535
5ed903b3
ST
1536 dev->version = version;
1537
5de88752
JM
1538 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1539 SAHARA_REG_CMD);
1540 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1541 SAHARA_CONTROL_SET_MAXBURST(8) |
1542 SAHARA_CONTROL_RNG_AUTORSD |
1543 SAHARA_CONTROL_ENABLE_INT,
1544 SAHARA_REG_CONTROL);
1545
1546 err = sahara_register_algs(dev);
1547 if (err)
1548 goto err_algs;
1549
1550 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1551
1552 return 0;
1553
1554err_algs:
c0c3c89a 1555 kthread_stop(dev->kthread);
5de88752 1556 dev_ptr = NULL;
7eac7144
FE
1557 clk_disable_unprepare(dev->clk_ahb);
1558clk_ipg_disable:
1559 clk_disable_unprepare(dev->clk_ipg);
5de88752
JM
1560
1561 return err;
1562}
1563
1564static int sahara_remove(struct platform_device *pdev)
1565{
1566 struct sahara_dev *dev = platform_get_drvdata(pdev);
1567
c0c3c89a 1568 kthread_stop(dev->kthread);
5de88752
JM
1569
1570 sahara_unregister_algs(dev);
1571
1572 clk_disable_unprepare(dev->clk_ipg);
1573 clk_disable_unprepare(dev->clk_ahb);
1574
1575 dev_ptr = NULL;
1576
1577 return 0;
1578}
1579
1580static struct platform_driver sahara_driver = {
1581 .probe = sahara_probe,
1582 .remove = sahara_remove,
1583 .driver = {
1584 .name = SAHARA_NAME,
1b0b2605 1585 .of_match_table = sahara_dt_ids,
5de88752
JM
1586 },
1587 .id_table = sahara_platform_ids,
1588};
1589
1590module_platform_driver(sahara_driver);
1591
1592MODULE_LICENSE("GPL");
1593MODULE_AUTHOR("Javier Martin <javier.martin@vista-silicon.com>");
5a2bb93f 1594MODULE_AUTHOR("Steffen Trumtrar <s.trumtrar@pengutronix.de>");
5de88752 1595MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");