]>
Commit | Line | Data |
---|---|---|
83d290c5 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
b9eebfad RG |
2 | /* |
3 | * Copyright 2014 Freescale Semiconductor, Inc. | |
4 | * | |
b9eebfad RG |
5 | */ |
6 | ||
7 | #include <common.h> | |
1eb69ae4 | 8 | #include <cpu_func.h> |
b9eebfad | 9 | #include <malloc.h> |
d7af2baa | 10 | #include <memalign.h> |
b9eebfad RG |
11 | #include "jobdesc.h" |
12 | #include "desc.h" | |
13 | #include "jr.h" | |
94e3c8c4 | 14 | #include "fsl_hash.h" |
15 | #include <hw_sha.h> | |
90526e9f | 16 | #include <asm/cache.h> |
5d97dff0 | 17 | #include <linux/errno.h> |
b9eebfad RG |
18 | |
19 | #define CRYPTO_MAX_ALG_NAME 80 | |
20 | #define SHA1_DIGEST_SIZE 20 | |
21 | #define SHA256_DIGEST_SIZE 32 | |
22 | ||
23 | struct caam_hash_template { | |
24 | char name[CRYPTO_MAX_ALG_NAME]; | |
25 | unsigned int digestsize; | |
26 | u32 alg_type; | |
27 | }; | |
28 | ||
29 | enum caam_hash_algos { | |
30 | SHA1 = 0, | |
31 | SHA256 | |
32 | }; | |
33 | ||
34 | static struct caam_hash_template driver_hash[] = { | |
35 | { | |
36 | .name = "sha1", | |
37 | .digestsize = SHA1_DIGEST_SIZE, | |
38 | .alg_type = OP_ALG_ALGSEL_SHA1, | |
39 | }, | |
40 | { | |
41 | .name = "sha256", | |
42 | .digestsize = SHA256_DIGEST_SIZE, | |
43 | .alg_type = OP_ALG_ALGSEL_SHA256, | |
44 | }, | |
45 | }; | |
46 | ||
94e3c8c4 | 47 | static enum caam_hash_algos get_hash_type(struct hash_algo *algo) |
48 | { | |
49 | if (!strcmp(algo->name, driver_hash[SHA1].name)) | |
50 | return SHA1; | |
51 | else | |
52 | return SHA256; | |
53 | } | |
54 | ||
55 | /* Create the context for progressive hashing using h/w acceleration. | |
56 | * | |
57 | * @ctxp: Pointer to the pointer of the context for hashing | |
58 | * @caam_algo: Enum for SHA1 or SHA256 | |
59 | * @return 0 if ok, -ENOMEM on error | |
60 | */ | |
61 | static int caam_hash_init(void **ctxp, enum caam_hash_algos caam_algo) | |
62 | { | |
63 | *ctxp = calloc(1, sizeof(struct sha_ctx)); | |
64 | if (*ctxp == NULL) { | |
65 | debug("Cannot allocate memory for context\n"); | |
66 | return -ENOMEM; | |
67 | } | |
68 | return 0; | |
69 | } | |
70 | ||
71 | /* | |
72 | * Update sg table for progressive hashing using h/w acceleration | |
73 | * | |
74 | * The context is freed by this function if an error occurs. | |
75 | * We support at most 32 Scatter/Gather Entries. | |
76 | * | |
77 | * @hash_ctx: Pointer to the context for hashing | |
78 | * @buf: Pointer to the buffer being hashed | |
79 | * @size: Size of the buffer being hashed | |
80 | * @is_last: 1 if this is the last update; 0 otherwise | |
81 | * @caam_algo: Enum for SHA1 or SHA256 | |
82 | * @return 0 if ok, -EINVAL on error | |
83 | */ | |
84 | static int caam_hash_update(void *hash_ctx, const void *buf, | |
85 | unsigned int size, int is_last, | |
86 | enum caam_hash_algos caam_algo) | |
87 | { | |
88 | uint32_t final = 0; | |
f59e69cb | 89 | phys_addr_t addr = virt_to_phys((void *)buf); |
94e3c8c4 | 90 | struct sha_ctx *ctx = hash_ctx; |
91 | ||
92 | if (ctx->sg_num >= MAX_SG_32) { | |
93 | free(ctx); | |
94 | return -EINVAL; | |
95 | } | |
96 | ||
97 | #ifdef CONFIG_PHYS_64BIT | |
f59e69cb | 98 | sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, (uint32_t)(addr >> 32)); |
94e3c8c4 | 99 | #else |
f59e69cb | 100 | sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_hi, 0x0); |
94e3c8c4 | 101 | #endif |
f59e69cb | 102 | sec_out32(&ctx->sg_tbl[ctx->sg_num].addr_lo, (uint32_t)addr); |
94e3c8c4 | 103 | |
104 | sec_out32(&ctx->sg_tbl[ctx->sg_num].len_flag, | |
105 | (size & SG_ENTRY_LENGTH_MASK)); | |
106 | ||
107 | ctx->sg_num++; | |
108 | ||
109 | if (is_last) { | |
110 | final = sec_in32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag) | | |
111 | SG_ENTRY_FINAL_BIT; | |
112 | sec_out32(&ctx->sg_tbl[ctx->sg_num - 1].len_flag, final); | |
113 | } | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
118 | /* | |
119 | * Perform progressive hashing on the given buffer and copy hash at | |
120 | * destination buffer | |
121 | * | |
122 | * The context is freed after completion of hash operation. | |
123 | * | |
124 | * @hash_ctx: Pointer to the context for hashing | |
125 | * @dest_buf: Pointer to the destination buffer where hash is to be copied | |
126 | * @size: Size of the buffer being hashed | |
127 | * @caam_algo: Enum for SHA1 or SHA256 | |
128 | * @return 0 if ok, -EINVAL on error | |
129 | */ | |
130 | static int caam_hash_finish(void *hash_ctx, void *dest_buf, | |
131 | int size, enum caam_hash_algos caam_algo) | |
132 | { | |
133 | uint32_t len = 0; | |
134 | struct sha_ctx *ctx = hash_ctx; | |
135 | int i = 0, ret = 0; | |
136 | ||
137 | if (size < driver_hash[caam_algo].digestsize) { | |
138 | free(ctx); | |
139 | return -EINVAL; | |
140 | } | |
141 | ||
142 | for (i = 0; i < ctx->sg_num; i++) | |
143 | len += (sec_in32(&ctx->sg_tbl[i].len_flag) & | |
144 | SG_ENTRY_LENGTH_MASK); | |
145 | ||
146 | inline_cnstr_jobdesc_hash(ctx->sha_desc, (uint8_t *)ctx->sg_tbl, len, | |
147 | ctx->hash, | |
148 | driver_hash[caam_algo].alg_type, | |
149 | driver_hash[caam_algo].digestsize, | |
150 | 1); | |
151 | ||
152 | ret = run_descriptor_jr(ctx->sha_desc); | |
153 | ||
154 | if (ret) | |
155 | debug("Error %x\n", ret); | |
156 | else | |
157 | memcpy(dest_buf, ctx->hash, sizeof(ctx->hash)); | |
158 | ||
159 | free(ctx); | |
160 | return ret; | |
161 | } | |
162 | ||
b9eebfad RG |
163 | int caam_hash(const unsigned char *pbuf, unsigned int buf_len, |
164 | unsigned char *pout, enum caam_hash_algos algo) | |
165 | { | |
166 | int ret = 0; | |
167 | uint32_t *desc; | |
d7af2baa | 168 | unsigned int size; |
b9eebfad | 169 | |
d7af2baa | 170 | desc = malloc_cache_aligned(sizeof(int) * MAX_CAAM_DESCSIZE); |
b9eebfad RG |
171 | if (!desc) { |
172 | debug("Not enough memory for descriptor allocation\n"); | |
94e3c8c4 | 173 | return -ENOMEM; |
b9eebfad RG |
174 | } |
175 | ||
d7af2baa BL |
176 | if (!IS_ALIGNED((uintptr_t)pbuf, ARCH_DMA_MINALIGN) || |
177 | !IS_ALIGNED((uintptr_t)pout, ARCH_DMA_MINALIGN)) { | |
178 | puts("Error: Address arguments are not aligned\n"); | |
179 | return -EINVAL; | |
180 | } | |
181 | ||
182 | size = ALIGN(buf_len, ARCH_DMA_MINALIGN); | |
183 | flush_dcache_range((unsigned long)pbuf, (unsigned long)pbuf + size); | |
184 | ||
b9eebfad RG |
185 | inline_cnstr_jobdesc_hash(desc, pbuf, buf_len, pout, |
186 | driver_hash[algo].alg_type, | |
187 | driver_hash[algo].digestsize, | |
188 | 0); | |
189 | ||
d7af2baa BL |
190 | size = ALIGN(sizeof(int) * MAX_CAAM_DESCSIZE, ARCH_DMA_MINALIGN); |
191 | flush_dcache_range((unsigned long)desc, (unsigned long)desc + size); | |
192 | ||
b9eebfad RG |
193 | ret = run_descriptor_jr(desc); |
194 | ||
d7af2baa BL |
195 | size = ALIGN(driver_hash[algo].digestsize, ARCH_DMA_MINALIGN); |
196 | invalidate_dcache_range((unsigned long)pout, | |
197 | (unsigned long)pout + size); | |
198 | ||
b9eebfad RG |
199 | free(desc); |
200 | return ret; | |
201 | } | |
202 | ||
203 | void hw_sha256(const unsigned char *pbuf, unsigned int buf_len, | |
204 | unsigned char *pout, unsigned int chunk_size) | |
205 | { | |
206 | if (caam_hash(pbuf, buf_len, pout, SHA256)) | |
207 | printf("CAAM was not setup properly or it is faulty\n"); | |
208 | } | |
209 | ||
210 | void hw_sha1(const unsigned char *pbuf, unsigned int buf_len, | |
211 | unsigned char *pout, unsigned int chunk_size) | |
212 | { | |
213 | if (caam_hash(pbuf, buf_len, pout, SHA1)) | |
214 | printf("CAAM was not setup properly or it is faulty\n"); | |
215 | } | |
94e3c8c4 | 216 | |
217 | int hw_sha_init(struct hash_algo *algo, void **ctxp) | |
218 | { | |
219 | return caam_hash_init(ctxp, get_hash_type(algo)); | |
220 | } | |
221 | ||
222 | int hw_sha_update(struct hash_algo *algo, void *ctx, const void *buf, | |
223 | unsigned int size, int is_last) | |
224 | { | |
225 | return caam_hash_update(ctx, buf, size, is_last, get_hash_type(algo)); | |
226 | } | |
227 | ||
228 | int hw_sha_finish(struct hash_algo *algo, void *ctx, void *dest_buf, | |
229 | int size) | |
230 | { | |
231 | return caam_hash_finish(ctx, dest_buf, size, get_hash_type(algo)); | |
232 | } |