]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Handle async block request by crypto hardware engine. | |
3 | * | |
4 | * Copyright (C) 2016 Linaro, Inc. | |
5 | * | |
6 | * Author: Baolin Wang <baolin.wang@linaro.org> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <linux/err.h> | |
16 | #include <linux/delay.h> | |
17 | #include <crypto/engine.h> | |
18 | #include <uapi/linux/sched/types.h> | |
19 | #include "internal.h" | |
20 | ||
21 | #define CRYPTO_ENGINE_MAX_QLEN 10 | |
22 | ||
23 | /** | |
24 | * crypto_finalize_request - finalize one request if the request is done | |
25 | * @engine: the hardware engine | |
26 | * @req: the request need to be finalized | |
27 | * @err: error number | |
28 | */ | |
29 | static void crypto_finalize_request(struct crypto_engine *engine, | |
30 | struct crypto_async_request *req, int err) | |
31 | { | |
32 | unsigned long flags; | |
33 | bool finalize_cur_req = false; | |
34 | int ret; | |
35 | struct crypto_engine_ctx *enginectx; | |
36 | ||
37 | spin_lock_irqsave(&engine->queue_lock, flags); | |
38 | if (engine->cur_req == req) | |
39 | finalize_cur_req = true; | |
40 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
41 | ||
42 | if (finalize_cur_req) { | |
43 | enginectx = crypto_tfm_ctx(req->tfm); | |
44 | if (engine->cur_req_prepared && | |
45 | enginectx->op.unprepare_request) { | |
46 | ret = enginectx->op.unprepare_request(engine, req); | |
47 | if (ret) | |
48 | dev_err(engine->dev, "failed to unprepare request\n"); | |
49 | } | |
50 | spin_lock_irqsave(&engine->queue_lock, flags); | |
51 | engine->cur_req = NULL; | |
52 | engine->cur_req_prepared = false; | |
53 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
54 | } | |
55 | ||
56 | req->complete(req, err); | |
57 | ||
58 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
59 | } | |
60 | ||
61 | /** | |
62 | * crypto_pump_requests - dequeue one request from engine queue to process | |
63 | * @engine: the hardware engine | |
64 | * @in_kthread: true if we are in the context of the request pump thread | |
65 | * | |
66 | * This function checks if there is any request in the engine queue that | |
67 | * needs processing and if so call out to the driver to initialize hardware | |
68 | * and handle each request. | |
69 | */ | |
70 | static void crypto_pump_requests(struct crypto_engine *engine, | |
71 | bool in_kthread) | |
72 | { | |
73 | struct crypto_async_request *async_req, *backlog; | |
74 | unsigned long flags; | |
75 | bool was_busy = false; | |
76 | int ret; | |
77 | struct crypto_engine_ctx *enginectx; | |
78 | ||
79 | spin_lock_irqsave(&engine->queue_lock, flags); | |
80 | ||
81 | /* Make sure we are not already running a request */ | |
82 | if (engine->cur_req) | |
83 | goto out; | |
84 | ||
85 | /* If another context is idling then defer */ | |
86 | if (engine->idling) { | |
87 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
88 | goto out; | |
89 | } | |
90 | ||
91 | /* Check if the engine queue is idle */ | |
92 | if (!crypto_queue_len(&engine->queue) || !engine->running) { | |
93 | if (!engine->busy) | |
94 | goto out; | |
95 | ||
96 | /* Only do teardown in the thread */ | |
97 | if (!in_kthread) { | |
98 | kthread_queue_work(engine->kworker, | |
99 | &engine->pump_requests); | |
100 | goto out; | |
101 | } | |
102 | ||
103 | engine->busy = false; | |
104 | engine->idling = true; | |
105 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
106 | ||
107 | if (engine->unprepare_crypt_hardware && | |
108 | engine->unprepare_crypt_hardware(engine)) | |
109 | dev_err(engine->dev, "failed to unprepare crypt hardware\n"); | |
110 | ||
111 | spin_lock_irqsave(&engine->queue_lock, flags); | |
112 | engine->idling = false; | |
113 | goto out; | |
114 | } | |
115 | ||
116 | /* Get the fist request from the engine queue to handle */ | |
117 | backlog = crypto_get_backlog(&engine->queue); | |
118 | async_req = crypto_dequeue_request(&engine->queue); | |
119 | if (!async_req) | |
120 | goto out; | |
121 | ||
122 | engine->cur_req = async_req; | |
123 | if (backlog) | |
124 | backlog->complete(backlog, -EINPROGRESS); | |
125 | ||
126 | if (engine->busy) | |
127 | was_busy = true; | |
128 | else | |
129 | engine->busy = true; | |
130 | ||
131 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
132 | ||
133 | /* Until here we get the request need to be encrypted successfully */ | |
134 | if (!was_busy && engine->prepare_crypt_hardware) { | |
135 | ret = engine->prepare_crypt_hardware(engine); | |
136 | if (ret) { | |
137 | dev_err(engine->dev, "failed to prepare crypt hardware\n"); | |
138 | goto req_err; | |
139 | } | |
140 | } | |
141 | ||
142 | enginectx = crypto_tfm_ctx(async_req->tfm); | |
143 | ||
144 | if (enginectx->op.prepare_request) { | |
145 | ret = enginectx->op.prepare_request(engine, async_req); | |
146 | if (ret) { | |
147 | dev_err(engine->dev, "failed to prepare request: %d\n", | |
148 | ret); | |
149 | goto req_err; | |
150 | } | |
151 | engine->cur_req_prepared = true; | |
152 | } | |
153 | if (!enginectx->op.do_one_request) { | |
154 | dev_err(engine->dev, "failed to do request\n"); | |
155 | ret = -EINVAL; | |
156 | goto req_err; | |
157 | } | |
158 | ret = enginectx->op.do_one_request(engine, async_req); | |
159 | if (ret) { | |
160 | dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); | |
161 | goto req_err; | |
162 | } | |
163 | return; | |
164 | ||
165 | req_err: | |
166 | crypto_finalize_request(engine, async_req, ret); | |
167 | return; | |
168 | ||
169 | out: | |
170 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
171 | } | |
172 | ||
173 | static void crypto_pump_work(struct kthread_work *work) | |
174 | { | |
175 | struct crypto_engine *engine = | |
176 | container_of(work, struct crypto_engine, pump_requests); | |
177 | ||
178 | crypto_pump_requests(engine, true); | |
179 | } | |
180 | ||
181 | /** | |
182 | * crypto_transfer_request - transfer the new request into the engine queue | |
183 | * @engine: the hardware engine | |
184 | * @req: the request need to be listed into the engine queue | |
185 | */ | |
186 | static int crypto_transfer_request(struct crypto_engine *engine, | |
187 | struct crypto_async_request *req, | |
188 | bool need_pump) | |
189 | { | |
190 | unsigned long flags; | |
191 | int ret; | |
192 | ||
193 | spin_lock_irqsave(&engine->queue_lock, flags); | |
194 | ||
195 | if (!engine->running) { | |
196 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
197 | return -ESHUTDOWN; | |
198 | } | |
199 | ||
200 | ret = crypto_enqueue_request(&engine->queue, req); | |
201 | ||
202 | if (!engine->busy && need_pump) | |
203 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
204 | ||
205 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
206 | return ret; | |
207 | } | |
208 | ||
209 | /** | |
210 | * crypto_transfer_request_to_engine - transfer one request to list | |
211 | * into the engine queue | |
212 | * @engine: the hardware engine | |
213 | * @req: the request need to be listed into the engine queue | |
214 | */ | |
215 | static int crypto_transfer_request_to_engine(struct crypto_engine *engine, | |
216 | struct crypto_async_request *req) | |
217 | { | |
218 | return crypto_transfer_request(engine, req, true); | |
219 | } | |
220 | ||
221 | /** | |
222 | * crypto_transfer_ablkcipher_request_to_engine - transfer one ablkcipher_request | |
223 | * to list into the engine queue | |
224 | * @engine: the hardware engine | |
225 | * @req: the request need to be listed into the engine queue | |
226 | * TODO: Remove this function when skcipher conversion is finished | |
227 | */ | |
228 | int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine, | |
229 | struct ablkcipher_request *req) | |
230 | { | |
231 | return crypto_transfer_request_to_engine(engine, &req->base); | |
232 | } | |
233 | EXPORT_SYMBOL_GPL(crypto_transfer_ablkcipher_request_to_engine); | |
234 | ||
235 | /** | |
236 | * crypto_transfer_aead_request_to_engine - transfer one aead_request | |
237 | * to list into the engine queue | |
238 | * @engine: the hardware engine | |
239 | * @req: the request need to be listed into the engine queue | |
240 | */ | |
241 | int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, | |
242 | struct aead_request *req) | |
243 | { | |
244 | return crypto_transfer_request_to_engine(engine, &req->base); | |
245 | } | |
246 | EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); | |
247 | ||
248 | /** | |
249 | * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request | |
250 | * to list into the engine queue | |
251 | * @engine: the hardware engine | |
252 | * @req: the request need to be listed into the engine queue | |
253 | */ | |
254 | int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, | |
255 | struct akcipher_request *req) | |
256 | { | |
257 | return crypto_transfer_request_to_engine(engine, &req->base); | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); | |
260 | ||
261 | /** | |
262 | * crypto_transfer_hash_request_to_engine - transfer one ahash_request | |
263 | * to list into the engine queue | |
264 | * @engine: the hardware engine | |
265 | * @req: the request need to be listed into the engine queue | |
266 | */ | |
267 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, | |
268 | struct ahash_request *req) | |
269 | { | |
270 | return crypto_transfer_request_to_engine(engine, &req->base); | |
271 | } | |
272 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); | |
273 | ||
274 | /** | |
275 | * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request | |
276 | * to list into the engine queue | |
277 | * @engine: the hardware engine | |
278 | * @req: the request need to be listed into the engine queue | |
279 | */ | |
280 | int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, | |
281 | struct skcipher_request *req) | |
282 | { | |
283 | return crypto_transfer_request_to_engine(engine, &req->base); | |
284 | } | |
285 | EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); | |
286 | ||
287 | /** | |
288 | * crypto_finalize_ablkcipher_request - finalize one ablkcipher_request if | |
289 | * the request is done | |
290 | * @engine: the hardware engine | |
291 | * @req: the request need to be finalized | |
292 | * @err: error number | |
293 | * TODO: Remove this function when skcipher conversion is finished | |
294 | */ | |
295 | void crypto_finalize_ablkcipher_request(struct crypto_engine *engine, | |
296 | struct ablkcipher_request *req, int err) | |
297 | { | |
298 | return crypto_finalize_request(engine, &req->base, err); | |
299 | } | |
300 | EXPORT_SYMBOL_GPL(crypto_finalize_ablkcipher_request); | |
301 | ||
302 | /** | |
303 | * crypto_finalize_aead_request - finalize one aead_request if | |
304 | * the request is done | |
305 | * @engine: the hardware engine | |
306 | * @req: the request need to be finalized | |
307 | * @err: error number | |
308 | */ | |
309 | void crypto_finalize_aead_request(struct crypto_engine *engine, | |
310 | struct aead_request *req, int err) | |
311 | { | |
312 | return crypto_finalize_request(engine, &req->base, err); | |
313 | } | |
314 | EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); | |
315 | ||
316 | /** | |
317 | * crypto_finalize_akcipher_request - finalize one akcipher_request if | |
318 | * the request is done | |
319 | * @engine: the hardware engine | |
320 | * @req: the request need to be finalized | |
321 | * @err: error number | |
322 | */ | |
323 | void crypto_finalize_akcipher_request(struct crypto_engine *engine, | |
324 | struct akcipher_request *req, int err) | |
325 | { | |
326 | return crypto_finalize_request(engine, &req->base, err); | |
327 | } | |
328 | EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); | |
329 | ||
330 | /** | |
331 | * crypto_finalize_hash_request - finalize one ahash_request if | |
332 | * the request is done | |
333 | * @engine: the hardware engine | |
334 | * @req: the request need to be finalized | |
335 | * @err: error number | |
336 | */ | |
337 | void crypto_finalize_hash_request(struct crypto_engine *engine, | |
338 | struct ahash_request *req, int err) | |
339 | { | |
340 | return crypto_finalize_request(engine, &req->base, err); | |
341 | } | |
342 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); | |
343 | ||
344 | /** | |
345 | * crypto_finalize_skcipher_request - finalize one skcipher_request if | |
346 | * the request is done | |
347 | * @engine: the hardware engine | |
348 | * @req: the request need to be finalized | |
349 | * @err: error number | |
350 | */ | |
351 | void crypto_finalize_skcipher_request(struct crypto_engine *engine, | |
352 | struct skcipher_request *req, int err) | |
353 | { | |
354 | return crypto_finalize_request(engine, &req->base, err); | |
355 | } | |
356 | EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); | |
357 | ||
358 | /** | |
359 | * crypto_engine_start - start the hardware engine | |
360 | * @engine: the hardware engine need to be started | |
361 | * | |
362 | * Return 0 on success, else on fail. | |
363 | */ | |
364 | int crypto_engine_start(struct crypto_engine *engine) | |
365 | { | |
366 | unsigned long flags; | |
367 | ||
368 | spin_lock_irqsave(&engine->queue_lock, flags); | |
369 | ||
370 | if (engine->running || engine->busy) { | |
371 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
372 | return -EBUSY; | |
373 | } | |
374 | ||
375 | engine->running = true; | |
376 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
377 | ||
378 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
379 | ||
380 | return 0; | |
381 | } | |
382 | EXPORT_SYMBOL_GPL(crypto_engine_start); | |
383 | ||
384 | /** | |
385 | * crypto_engine_stop - stop the hardware engine | |
386 | * @engine: the hardware engine need to be stopped | |
387 | * | |
388 | * Return 0 on success, else on fail. | |
389 | */ | |
390 | int crypto_engine_stop(struct crypto_engine *engine) | |
391 | { | |
392 | unsigned long flags; | |
393 | unsigned int limit = 500; | |
394 | int ret = 0; | |
395 | ||
396 | spin_lock_irqsave(&engine->queue_lock, flags); | |
397 | ||
398 | /* | |
399 | * If the engine queue is not empty or the engine is on busy state, | |
400 | * we need to wait for a while to pump the requests of engine queue. | |
401 | */ | |
402 | while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { | |
403 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
404 | msleep(20); | |
405 | spin_lock_irqsave(&engine->queue_lock, flags); | |
406 | } | |
407 | ||
408 | if (crypto_queue_len(&engine->queue) || engine->busy) | |
409 | ret = -EBUSY; | |
410 | else | |
411 | engine->running = false; | |
412 | ||
413 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
414 | ||
415 | if (ret) | |
416 | dev_warn(engine->dev, "could not stop engine\n"); | |
417 | ||
418 | return ret; | |
419 | } | |
420 | EXPORT_SYMBOL_GPL(crypto_engine_stop); | |
421 | ||
422 | /** | |
423 | * crypto_engine_alloc_init - allocate crypto hardware engine structure and | |
424 | * initialize it. | |
425 | * @dev: the device attached with one hardware engine | |
426 | * @rt: whether this queue is set to run as a realtime task | |
427 | * | |
428 | * This must be called from context that can sleep. | |
429 | * Return: the crypto engine structure on success, else NULL. | |
430 | */ | |
431 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |
432 | { | |
433 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | |
434 | struct crypto_engine *engine; | |
435 | ||
436 | if (!dev) | |
437 | return NULL; | |
438 | ||
439 | engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); | |
440 | if (!engine) | |
441 | return NULL; | |
442 | ||
443 | engine->dev = dev; | |
444 | engine->rt = rt; | |
445 | engine->running = false; | |
446 | engine->busy = false; | |
447 | engine->idling = false; | |
448 | engine->cur_req_prepared = false; | |
449 | engine->priv_data = dev; | |
450 | snprintf(engine->name, sizeof(engine->name), | |
451 | "%s-engine", dev_name(dev)); | |
452 | ||
453 | crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); | |
454 | spin_lock_init(&engine->queue_lock); | |
455 | ||
456 | engine->kworker = kthread_create_worker(0, "%s", engine->name); | |
457 | if (IS_ERR(engine->kworker)) { | |
458 | dev_err(dev, "failed to create crypto request pump task\n"); | |
459 | return NULL; | |
460 | } | |
461 | kthread_init_work(&engine->pump_requests, crypto_pump_work); | |
462 | ||
463 | if (engine->rt) { | |
464 | dev_info(dev, "will run requests pump with realtime priority\n"); | |
465 | sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); | |
466 | } | |
467 | ||
468 | return engine; | |
469 | } | |
470 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); | |
471 | ||
472 | /** | |
473 | * crypto_engine_exit - free the resources of hardware engine when exit | |
474 | * @engine: the hardware engine need to be freed | |
475 | * | |
476 | * Return 0 for success. | |
477 | */ | |
478 | int crypto_engine_exit(struct crypto_engine *engine) | |
479 | { | |
480 | int ret; | |
481 | ||
482 | ret = crypto_engine_stop(engine); | |
483 | if (ret) | |
484 | return ret; | |
485 | ||
486 | kthread_destroy_worker(engine->kworker); | |
487 | ||
488 | return 0; | |
489 | } | |
490 | EXPORT_SYMBOL_GPL(crypto_engine_exit); | |
491 | ||
492 | MODULE_LICENSE("GPL"); | |
493 | MODULE_DESCRIPTION("Crypto hardware engine framework"); |