1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Handle async block request by crypto hardware engine.
5 * Copyright (C) 2016 Linaro, Inc.
7 * Author: Baolin Wang <baolin.wang@linaro.org>
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <crypto/engine.h>
14 #include <uapi/linux/sched/types.h>
17 #define CRYPTO_ENGINE_MAX_QLEN 10
20 * crypto_finalize_request - finalize one request if the request is done
21 * @engine: the hardware engine
22 * @req: the request need to be finalized
25 static void crypto_finalize_request(struct crypto_engine
*engine
,
26 struct crypto_async_request
*req
, int err
)
29 bool finalize_req
= false;
31 struct crypto_engine_ctx
*enginectx
;
34 * If hardware cannot enqueue more requests
35 * and retry mechanism is not supported
36 * make sure we are completing the current request
38 if (!engine
->retry_support
) {
39 spin_lock_irqsave(&engine
->queue_lock
, flags
);
40 if (engine
->cur_req
== req
) {
42 engine
->cur_req
= NULL
;
44 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
47 if (finalize_req
|| engine
->retry_support
) {
48 enginectx
= crypto_tfm_ctx(req
->tfm
);
49 if (enginectx
->op
.prepare_request
&&
50 enginectx
->op
.unprepare_request
) {
51 ret
= enginectx
->op
.unprepare_request(engine
, req
);
53 dev_err(engine
->dev
, "failed to unprepare request\n");
56 lockdep_assert_in_softirq();
57 req
->complete(req
, err
);
59 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
63 * crypto_pump_requests - dequeue one request from engine queue to process
64 * @engine: the hardware engine
65 * @in_kthread: true if we are in the context of the request pump thread
67 * This function checks if there is any request in the engine queue that
68 * needs processing and if so call out to the driver to initialize hardware
69 * and handle each request.
71 static void crypto_pump_requests(struct crypto_engine
*engine
,
74 struct crypto_async_request
*async_req
, *backlog
;
76 bool was_busy
= false;
78 struct crypto_engine_ctx
*enginectx
;
80 spin_lock_irqsave(&engine
->queue_lock
, flags
);
82 /* Make sure we are not already running a request */
83 if (!engine
->retry_support
&& engine
->cur_req
)
86 /* If another context is idling then defer */
88 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
92 /* Check if the engine queue is idle */
93 if (!crypto_queue_len(&engine
->queue
) || !engine
->running
) {
97 /* Only do teardown in the thread */
99 kthread_queue_work(engine
->kworker
,
100 &engine
->pump_requests
);
104 engine
->busy
= false;
105 engine
->idling
= true;
106 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
108 if (engine
->unprepare_crypt_hardware
&&
109 engine
->unprepare_crypt_hardware(engine
))
110 dev_err(engine
->dev
, "failed to unprepare crypt hardware\n");
112 spin_lock_irqsave(&engine
->queue_lock
, flags
);
113 engine
->idling
= false;
118 /* Get the fist request from the engine queue to handle */
119 backlog
= crypto_get_backlog(&engine
->queue
);
120 async_req
= crypto_dequeue_request(&engine
->queue
);
125 * If hardware doesn't support the retry mechanism,
126 * keep track of the request we are processing now.
127 * We'll need it on completion (crypto_finalize_request).
129 if (!engine
->retry_support
)
130 engine
->cur_req
= async_req
;
133 backlog
->complete(backlog
, -EINPROGRESS
);
140 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
142 /* Until here we get the request need to be encrypted successfully */
143 if (!was_busy
&& engine
->prepare_crypt_hardware
) {
144 ret
= engine
->prepare_crypt_hardware(engine
);
146 dev_err(engine
->dev
, "failed to prepare crypt hardware\n");
151 enginectx
= crypto_tfm_ctx(async_req
->tfm
);
153 if (enginectx
->op
.prepare_request
) {
154 ret
= enginectx
->op
.prepare_request(engine
, async_req
);
156 dev_err(engine
->dev
, "failed to prepare request: %d\n",
161 if (!enginectx
->op
.do_one_request
) {
162 dev_err(engine
->dev
, "failed to do request\n");
167 ret
= enginectx
->op
.do_one_request(engine
, async_req
);
169 /* Request unsuccessfully executed by hardware */
172 * If hardware queue is full (-ENOSPC), requeue request
173 * regardless of backlog flag.
174 * Otherwise, unprepare and complete the request.
176 if (!engine
->retry_support
||
179 "Failed to do one request from queue: %d\n",
184 * If retry mechanism is supported,
185 * unprepare current request and
186 * enqueue it back into crypto-engine queue.
188 if (enginectx
->op
.unprepare_request
) {
189 ret
= enginectx
->op
.unprepare_request(engine
,
193 "failed to unprepare request\n");
195 spin_lock_irqsave(&engine
->queue_lock
, flags
);
197 * If hardware was unable to execute request, enqueue it
198 * back in front of crypto-engine queue, to keep the order
201 crypto_enqueue_request_head(&engine
->queue
, async_req
);
203 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
210 if (enginectx
->op
.unprepare_request
) {
211 ret
= enginectx
->op
.unprepare_request(engine
, async_req
);
213 dev_err(engine
->dev
, "failed to unprepare request\n");
217 async_req
->complete(async_req
, ret
);
220 /* If retry mechanism is supported, send new requests to engine */
221 if (engine
->retry_support
) {
222 spin_lock_irqsave(&engine
->queue_lock
, flags
);
228 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
231 * Batch requests is possible only if
232 * hardware can enqueue multiple requests
234 if (engine
->do_batch_requests
) {
235 ret
= engine
->do_batch_requests(engine
);
237 dev_err(engine
->dev
, "failed to do batch requests: %d\n",
244 static void crypto_pump_work(struct kthread_work
*work
)
246 struct crypto_engine
*engine
=
247 container_of(work
, struct crypto_engine
, pump_requests
);
249 crypto_pump_requests(engine
, true);
253 * crypto_transfer_request - transfer the new request into the engine queue
254 * @engine: the hardware engine
255 * @req: the request need to be listed into the engine queue
256 * @need_pump: indicates whether queue the pump of request to kthread_work
258 static int crypto_transfer_request(struct crypto_engine
*engine
,
259 struct crypto_async_request
*req
,
265 spin_lock_irqsave(&engine
->queue_lock
, flags
);
267 if (!engine
->running
) {
268 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
272 ret
= crypto_enqueue_request(&engine
->queue
, req
);
274 if (!engine
->busy
&& need_pump
)
275 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
277 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
282 * crypto_transfer_request_to_engine - transfer one request to list
283 * into the engine queue
284 * @engine: the hardware engine
285 * @req: the request need to be listed into the engine queue
287 static int crypto_transfer_request_to_engine(struct crypto_engine
*engine
,
288 struct crypto_async_request
*req
)
290 return crypto_transfer_request(engine
, req
, true);
294 * crypto_transfer_aead_request_to_engine - transfer one aead_request
295 * to list into the engine queue
296 * @engine: the hardware engine
297 * @req: the request need to be listed into the engine queue
299 int crypto_transfer_aead_request_to_engine(struct crypto_engine
*engine
,
300 struct aead_request
*req
)
302 return crypto_transfer_request_to_engine(engine
, &req
->base
);
304 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine
);
307 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
308 * to list into the engine queue
309 * @engine: the hardware engine
310 * @req: the request need to be listed into the engine queue
312 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine
*engine
,
313 struct akcipher_request
*req
)
315 return crypto_transfer_request_to_engine(engine
, &req
->base
);
317 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine
);
320 * crypto_transfer_hash_request_to_engine - transfer one ahash_request
321 * to list into the engine queue
322 * @engine: the hardware engine
323 * @req: the request need to be listed into the engine queue
325 int crypto_transfer_hash_request_to_engine(struct crypto_engine
*engine
,
326 struct ahash_request
*req
)
328 return crypto_transfer_request_to_engine(engine
, &req
->base
);
330 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine
);
333 * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
334 * into the engine queue
335 * @engine: the hardware engine
336 * @req: the request need to be listed into the engine queue
338 int crypto_transfer_kpp_request_to_engine(struct crypto_engine
*engine
,
339 struct kpp_request
*req
)
341 return crypto_transfer_request_to_engine(engine
, &req
->base
);
343 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine
);
346 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
347 * to list into the engine queue
348 * @engine: the hardware engine
349 * @req: the request need to be listed into the engine queue
351 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine
*engine
,
352 struct skcipher_request
*req
)
354 return crypto_transfer_request_to_engine(engine
, &req
->base
);
356 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine
);
359 * crypto_finalize_aead_request - finalize one aead_request if
360 * the request is done
361 * @engine: the hardware engine
362 * @req: the request need to be finalized
365 void crypto_finalize_aead_request(struct crypto_engine
*engine
,
366 struct aead_request
*req
, int err
)
368 return crypto_finalize_request(engine
, &req
->base
, err
);
370 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request
);
373 * crypto_finalize_akcipher_request - finalize one akcipher_request if
374 * the request is done
375 * @engine: the hardware engine
376 * @req: the request need to be finalized
379 void crypto_finalize_akcipher_request(struct crypto_engine
*engine
,
380 struct akcipher_request
*req
, int err
)
382 return crypto_finalize_request(engine
, &req
->base
, err
);
384 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request
);
387 * crypto_finalize_hash_request - finalize one ahash_request if
388 * the request is done
389 * @engine: the hardware engine
390 * @req: the request need to be finalized
393 void crypto_finalize_hash_request(struct crypto_engine
*engine
,
394 struct ahash_request
*req
, int err
)
396 return crypto_finalize_request(engine
, &req
->base
, err
);
398 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request
);
401 * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
402 * @engine: the hardware engine
403 * @req: the request need to be finalized
406 void crypto_finalize_kpp_request(struct crypto_engine
*engine
,
407 struct kpp_request
*req
, int err
)
409 return crypto_finalize_request(engine
, &req
->base
, err
);
411 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request
);
414 * crypto_finalize_skcipher_request - finalize one skcipher_request if
415 * the request is done
416 * @engine: the hardware engine
417 * @req: the request need to be finalized
420 void crypto_finalize_skcipher_request(struct crypto_engine
*engine
,
421 struct skcipher_request
*req
, int err
)
423 return crypto_finalize_request(engine
, &req
->base
, err
);
425 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request
);
428 * crypto_engine_start - start the hardware engine
429 * @engine: the hardware engine need to be started
431 * Return 0 on success, else on fail.
433 int crypto_engine_start(struct crypto_engine
*engine
)
437 spin_lock_irqsave(&engine
->queue_lock
, flags
);
439 if (engine
->running
|| engine
->busy
) {
440 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
444 engine
->running
= true;
445 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
447 kthread_queue_work(engine
->kworker
, &engine
->pump_requests
);
451 EXPORT_SYMBOL_GPL(crypto_engine_start
);
454 * crypto_engine_stop - stop the hardware engine
455 * @engine: the hardware engine need to be stopped
457 * Return 0 on success, else on fail.
459 int crypto_engine_stop(struct crypto_engine
*engine
)
462 unsigned int limit
= 500;
465 spin_lock_irqsave(&engine
->queue_lock
, flags
);
468 * If the engine queue is not empty or the engine is on busy state,
469 * we need to wait for a while to pump the requests of engine queue.
471 while ((crypto_queue_len(&engine
->queue
) || engine
->busy
) && limit
--) {
472 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
474 spin_lock_irqsave(&engine
->queue_lock
, flags
);
477 if (crypto_queue_len(&engine
->queue
) || engine
->busy
)
480 engine
->running
= false;
482 spin_unlock_irqrestore(&engine
->queue_lock
, flags
);
485 dev_warn(engine
->dev
, "could not stop engine\n");
489 EXPORT_SYMBOL_GPL(crypto_engine_stop
);
492 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
493 * and initialize it by setting the maximum number of entries in the software
494 * crypto-engine queue.
495 * @dev: the device attached with one hardware engine
496 * @retry_support: whether hardware has support for retry mechanism
497 * @cbk_do_batch: pointer to a callback function to be invoked when executing
498 * a batch of requests.
500 * callback(struct crypto_engine *engine)
502 * @engine: the crypto engine structure.
503 * @rt: whether this queue is set to run as a realtime task
504 * @qlen: maximum size of the crypto-engine queue
506 * This must be called from context that can sleep.
507 * Return: the crypto engine structure on success, else NULL.
509 struct crypto_engine
*crypto_engine_alloc_init_and_set(struct device
*dev
,
511 int (*cbk_do_batch
)(struct crypto_engine
*engine
),
514 struct crypto_engine
*engine
;
519 engine
= devm_kzalloc(dev
, sizeof(*engine
), GFP_KERNEL
);
525 engine
->running
= false;
526 engine
->busy
= false;
527 engine
->idling
= false;
528 engine
->retry_support
= retry_support
;
529 engine
->priv_data
= dev
;
531 * Batch requests is possible only if
532 * hardware has support for retry mechanism.
534 engine
->do_batch_requests
= retry_support
? cbk_do_batch
: NULL
;
536 snprintf(engine
->name
, sizeof(engine
->name
),
537 "%s-engine", dev_name(dev
));
539 crypto_init_queue(&engine
->queue
, qlen
);
540 spin_lock_init(&engine
->queue_lock
);
542 engine
->kworker
= kthread_create_worker(0, "%s", engine
->name
);
543 if (IS_ERR(engine
->kworker
)) {
544 dev_err(dev
, "failed to create crypto request pump task\n");
547 kthread_init_work(&engine
->pump_requests
, crypto_pump_work
);
550 dev_info(dev
, "will run requests pump with realtime priority\n");
551 sched_set_fifo(engine
->kworker
->task
);
556 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set
);
559 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
561 * @dev: the device attached with one hardware engine
562 * @rt: whether this queue is set to run as a realtime task
564 * This must be called from context that can sleep.
565 * Return: the crypto engine structure on success, else NULL.
567 struct crypto_engine
*crypto_engine_alloc_init(struct device
*dev
, bool rt
)
569 return crypto_engine_alloc_init_and_set(dev
, false, NULL
, rt
,
570 CRYPTO_ENGINE_MAX_QLEN
);
572 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init
);
575 * crypto_engine_exit - free the resources of hardware engine when exit
576 * @engine: the hardware engine need to be freed
578 * Return 0 for success.
580 int crypto_engine_exit(struct crypto_engine
*engine
)
584 ret
= crypto_engine_stop(engine
);
588 kthread_destroy_worker(engine
->kworker
);
592 EXPORT_SYMBOL_GPL(crypto_engine_exit
);
594 MODULE_LICENSE("GPL");
595 MODULE_DESCRIPTION("Crypto hardware engine framework");