]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
db509a45 BB |
2 | /* |
3 | * Provide TDMA helper functions used by cipher and hash algorithm | |
4 | * implementations. | |
5 | * | |
6 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> | |
7 | * Author: Arnaud Ebalard <arno@natisbad.org> | |
8 | * | |
9 | * This work is based on an initial version written by | |
10 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > | |
db509a45 BB |
11 | */ |
12 | ||
13 | #include "cesa.h" | |
14 | ||
15 | bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, | |
16 | struct mv_cesa_sg_dma_iter *sgiter, | |
17 | unsigned int len) | |
18 | { | |
19 | if (!sgiter->sg) | |
20 | return false; | |
21 | ||
22 | sgiter->op_offset += len; | |
23 | sgiter->offset += len; | |
24 | if (sgiter->offset == sg_dma_len(sgiter->sg)) { | |
25 | if (sg_is_last(sgiter->sg)) | |
26 | return false; | |
27 | sgiter->offset = 0; | |
28 | sgiter->sg = sg_next(sgiter->sg); | |
29 | } | |
30 | ||
31 | if (sgiter->op_offset == iter->op_len) | |
32 | return false; | |
33 | ||
34 | return true; | |
35 | } | |
36 | ||
53da740f | 37 | void mv_cesa_dma_step(struct mv_cesa_req *dreq) |
db509a45 | 38 | { |
53da740f | 39 | struct mv_cesa_engine *engine = dreq->engine; |
db509a45 | 40 | |
b1508561 | 41 | writel_relaxed(0, engine->regs + CESA_SA_CFG); |
db509a45 BB |
42 | |
43 | mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); | |
b1508561 RK |
44 | writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | |
45 | CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, | |
46 | engine->regs + CESA_TDMA_CONTROL); | |
47 | ||
48 | writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | | |
49 | CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, | |
50 | engine->regs + CESA_SA_CFG); | |
51 | writel_relaxed(dreq->chain.first->cur_dma, | |
52 | engine->regs + CESA_TDMA_NEXT_ADDR); | |
f6283088 RP |
53 | BUG_ON(readl(engine->regs + CESA_SA_CMD) & |
54 | CESA_SA_CMD_EN_CESA_SA_ACCL0); | |
db509a45 BB |
55 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); |
56 | } | |
57 | ||
53da740f | 58 | void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq) |
db509a45 BB |
59 | { |
60 | struct mv_cesa_tdma_desc *tdma; | |
61 | ||
62 | for (tdma = dreq->chain.first; tdma;) { | |
63 | struct mv_cesa_tdma_desc *old_tdma = tdma; | |
b99acf79 | 64 | u32 type = tdma->flags & CESA_TDMA_TYPE_MSK; |
db509a45 | 65 | |
b99acf79 | 66 | if (type == CESA_TDMA_OP) |
db509a45 BB |
67 | dma_pool_free(cesa_dev->dma->op_pool, tdma->op, |
68 | le32_to_cpu(tdma->src)); | |
69 | ||
70 | tdma = tdma->next; | |
71 | dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, | |
5d754137 | 72 | old_tdma->cur_dma); |
db509a45 BB |
73 | } |
74 | ||
75 | dreq->chain.first = NULL; | |
76 | dreq->chain.last = NULL; | |
77 | } | |
78 | ||
53da740f | 79 | void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, |
db509a45 BB |
80 | struct mv_cesa_engine *engine) |
81 | { | |
82 | struct mv_cesa_tdma_desc *tdma; | |
83 | ||
84 | for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { | |
85 | if (tdma->flags & CESA_TDMA_DST_IN_SRAM) | |
86 | tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); | |
87 | ||
88 | if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) | |
89 | tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); | |
90 | ||
b99acf79 | 91 | if ((tdma->flags & CESA_TDMA_TYPE_MSK) == CESA_TDMA_OP) |
db509a45 BB |
92 | mv_cesa_adjust_op(engine, tdma->op); |
93 | } | |
94 | } | |
95 | ||
85030c51 RP |
96 | void mv_cesa_tdma_chain(struct mv_cesa_engine *engine, |
97 | struct mv_cesa_req *dreq) | |
98 | { | |
99 | if (engine->chain.first == NULL && engine->chain.last == NULL) { | |
100 | engine->chain.first = dreq->chain.first; | |
101 | engine->chain.last = dreq->chain.last; | |
102 | } else { | |
103 | struct mv_cesa_tdma_desc *last; | |
104 | ||
105 | last = engine->chain.last; | |
106 | last->next = dreq->chain.first; | |
107 | engine->chain.last = dreq->chain.last; | |
108 | ||
8759fec4 RP |
109 | /* |
110 | * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on | |
111 | * the last element of the current chain, or if the request | |
112 | * being queued needs the IV regs to be set before lauching | |
113 | * the request. | |
114 | */ | |
115 | if (!(last->flags & CESA_TDMA_BREAK_CHAIN) && | |
116 | !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) | |
85030c51 RP |
117 | last->next_dma = dreq->chain.first->cur_dma; |
118 | } | |
119 | } | |
120 | ||
121 | int mv_cesa_tdma_process(struct mv_cesa_engine *engine, u32 status) | |
122 | { | |
123 | struct crypto_async_request *req = NULL; | |
124 | struct mv_cesa_tdma_desc *tdma = NULL, *next = NULL; | |
125 | dma_addr_t tdma_cur; | |
126 | int res = 0; | |
127 | ||
128 | tdma_cur = readl(engine->regs + CESA_TDMA_CUR); | |
129 | ||
130 | for (tdma = engine->chain.first; tdma; tdma = next) { | |
131 | spin_lock_bh(&engine->lock); | |
132 | next = tdma->next; | |
133 | spin_unlock_bh(&engine->lock); | |
134 | ||
135 | if (tdma->flags & CESA_TDMA_END_OF_REQ) { | |
136 | struct crypto_async_request *backlog = NULL; | |
137 | struct mv_cesa_ctx *ctx; | |
138 | u32 current_status; | |
139 | ||
140 | spin_lock_bh(&engine->lock); | |
141 | /* | |
142 | * if req is NULL, this means we're processing the | |
143 | * request in engine->req. | |
144 | */ | |
145 | if (!req) | |
146 | req = engine->req; | |
147 | else | |
148 | req = mv_cesa_dequeue_req_locked(engine, | |
149 | &backlog); | |
150 | ||
151 | /* Re-chaining to the next request */ | |
152 | engine->chain.first = tdma->next; | |
153 | tdma->next = NULL; | |
154 | ||
155 | /* If this is the last request, clear the chain */ | |
156 | if (engine->chain.first == NULL) | |
157 | engine->chain.last = NULL; | |
158 | spin_unlock_bh(&engine->lock); | |
159 | ||
160 | ctx = crypto_tfm_ctx(req->tfm); | |
161 | current_status = (tdma->cur_dma == tdma_cur) ? | |
162 | status : CESA_SA_INT_ACC0_IDMA_DONE; | |
163 | res = ctx->ops->process(req, current_status); | |
164 | ctx->ops->complete(req); | |
165 | ||
166 | if (res == 0) | |
167 | mv_cesa_engine_enqueue_complete_request(engine, | |
168 | req); | |
169 | ||
170 | if (backlog) | |
171 | backlog->complete(backlog, -EINPROGRESS); | |
172 | } | |
173 | ||
174 | if (res || tdma->cur_dma == tdma_cur) | |
175 | break; | |
176 | } | |
177 | ||
178 | /* Save the last request in error to engine->req, so that the core | |
179 | * knows which request was fautly */ | |
180 | if (res) { | |
181 | spin_lock_bh(&engine->lock); | |
182 | engine->req = req; | |
183 | spin_unlock_bh(&engine->lock); | |
184 | } | |
185 | ||
186 | return res; | |
187 | } | |
188 | ||
db509a45 BB |
189 | static struct mv_cesa_tdma_desc * |
190 | mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) | |
191 | { | |
192 | struct mv_cesa_tdma_desc *new_tdma = NULL; | |
193 | dma_addr_t dma_handle; | |
194 | ||
472d640b JL |
195 | new_tdma = dma_pool_zalloc(cesa_dev->dma->tdma_desc_pool, flags, |
196 | &dma_handle); | |
db509a45 BB |
197 | if (!new_tdma) |
198 | return ERR_PTR(-ENOMEM); | |
199 | ||
5d754137 | 200 | new_tdma->cur_dma = dma_handle; |
db509a45 | 201 | if (chain->last) { |
5d754137 | 202 | chain->last->next_dma = cpu_to_le32(dma_handle); |
db509a45 BB |
203 | chain->last->next = new_tdma; |
204 | } else { | |
205 | chain->first = new_tdma; | |
206 | } | |
207 | ||
208 | chain->last = new_tdma; | |
209 | ||
210 | return new_tdma; | |
211 | } | |
212 | ||
0c99620f | 213 | int mv_cesa_dma_add_result_op(struct mv_cesa_tdma_chain *chain, dma_addr_t src, |
bac8e805 RP |
214 | u32 size, u32 flags, gfp_t gfp_flags) |
215 | { | |
0c99620f | 216 | struct mv_cesa_tdma_desc *tdma, *op_desc; |
bac8e805 RP |
217 | |
218 | tdma = mv_cesa_dma_add_desc(chain, gfp_flags); | |
219 | if (IS_ERR(tdma)) | |
220 | return PTR_ERR(tdma); | |
221 | ||
0c99620f RP |
222 | /* We re-use an existing op_desc object to retrieve the context |
223 | * and result instead of allocating a new one. | |
224 | * There is at least one object of this type in a CESA crypto | |
225 | * req, just pick the first one in the chain. | |
226 | */ | |
227 | for (op_desc = chain->first; op_desc; op_desc = op_desc->next) { | |
228 | u32 type = op_desc->flags & CESA_TDMA_TYPE_MSK; | |
229 | ||
230 | if (type == CESA_TDMA_OP) | |
231 | break; | |
232 | } | |
233 | ||
234 | if (!op_desc) | |
235 | return -EIO; | |
bac8e805 RP |
236 | |
237 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); | |
238 | tdma->src = src; | |
0c99620f RP |
239 | tdma->dst = op_desc->src; |
240 | tdma->op = op_desc->op; | |
bac8e805 RP |
241 | |
242 | flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); | |
0c99620f | 243 | tdma->flags = flags | CESA_TDMA_RESULT; |
bac8e805 RP |
244 | return 0; |
245 | } | |
246 | ||
db509a45 BB |
247 | struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, |
248 | const struct mv_cesa_op_ctx *op_templ, | |
249 | bool skip_ctx, | |
250 | gfp_t flags) | |
251 | { | |
252 | struct mv_cesa_tdma_desc *tdma; | |
253 | struct mv_cesa_op_ctx *op; | |
254 | dma_addr_t dma_handle; | |
6de59d45 | 255 | unsigned int size; |
db509a45 BB |
256 | |
257 | tdma = mv_cesa_dma_add_desc(chain, flags); | |
258 | if (IS_ERR(tdma)) | |
259 | return ERR_CAST(tdma); | |
260 | ||
261 | op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle); | |
262 | if (!op) | |
263 | return ERR_PTR(-ENOMEM); | |
264 | ||
265 | *op = *op_templ; | |
266 | ||
6de59d45 RK |
267 | size = skip_ctx ? sizeof(op->desc) : sizeof(*op); |
268 | ||
db509a45 BB |
269 | tdma = chain->last; |
270 | tdma->op = op; | |
6de59d45 | 271 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); |
ea1f662b | 272 | tdma->src = cpu_to_le32(dma_handle); |
36225b91 | 273 | tdma->dst = CESA_SA_CFG_SRAM_OFFSET; |
db509a45 BB |
274 | tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; |
275 | ||
276 | return op; | |
277 | } | |
278 | ||
279 | int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, | |
280 | dma_addr_t dst, dma_addr_t src, u32 size, | |
281 | u32 flags, gfp_t gfp_flags) | |
282 | { | |
283 | struct mv_cesa_tdma_desc *tdma; | |
284 | ||
285 | tdma = mv_cesa_dma_add_desc(chain, gfp_flags); | |
286 | if (IS_ERR(tdma)) | |
287 | return PTR_ERR(tdma); | |
288 | ||
6de59d45 | 289 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); |
db509a45 BB |
290 | tdma->src = src; |
291 | tdma->dst = dst; | |
292 | ||
293 | flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); | |
294 | tdma->flags = flags | CESA_TDMA_DATA; | |
295 | ||
296 | return 0; | |
297 | } | |
298 | ||
35622eae | 299 | int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags) |
db509a45 BB |
300 | { |
301 | struct mv_cesa_tdma_desc *tdma; | |
302 | ||
303 | tdma = mv_cesa_dma_add_desc(chain, flags); | |
06ec1f82 | 304 | return PTR_ERR_OR_ZERO(tdma); |
db509a45 BB |
305 | } |
306 | ||
35622eae | 307 | int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags) |
db509a45 BB |
308 | { |
309 | struct mv_cesa_tdma_desc *tdma; | |
310 | ||
311 | tdma = mv_cesa_dma_add_desc(chain, flags); | |
312 | if (IS_ERR(tdma)) | |
313 | return PTR_ERR(tdma); | |
314 | ||
6de59d45 | 315 | tdma->byte_cnt = cpu_to_le32(BIT(31)); |
db509a45 BB |
316 | |
317 | return 0; | |
318 | } | |
319 | ||
320 | int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, | |
321 | struct mv_cesa_dma_iter *dma_iter, | |
322 | struct mv_cesa_sg_dma_iter *sgiter, | |
323 | gfp_t gfp_flags) | |
324 | { | |
325 | u32 flags = sgiter->dir == DMA_TO_DEVICE ? | |
326 | CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM; | |
327 | unsigned int len; | |
328 | ||
329 | do { | |
330 | dma_addr_t dst, src; | |
331 | int ret; | |
332 | ||
333 | len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter); | |
334 | if (sgiter->dir == DMA_TO_DEVICE) { | |
335 | dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; | |
336 | src = sg_dma_address(sgiter->sg) + sgiter->offset; | |
337 | } else { | |
338 | dst = sg_dma_address(sgiter->sg) + sgiter->offset; | |
339 | src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; | |
340 | } | |
341 | ||
342 | ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len, | |
343 | flags, gfp_flags); | |
344 | if (ret) | |
345 | return ret; | |
346 | ||
347 | } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len)); | |
348 | ||
349 | return 0; | |
350 | } |