Lines Matching defs:engine
3 * Handle async block request by crypto hardware engine.
13 #include <crypto/engine.h>
21 * @engine: the hardware engine
25 static void crypto_finalize_request(struct crypto_engine *engine,
38 if (!engine->retry_support) {
39 spin_lock_irqsave(&engine->queue_lock, flags);
40 if (engine->cur_req == req) {
42 engine->cur_req = NULL;
44 spin_unlock_irqrestore(&engine->queue_lock, flags);
47 if (finalize_req || engine->retry_support) {
51 ret = enginectx->op.unprepare_request(engine, req);
53 dev_err(engine->dev, "failed to unprepare request\n");
58 kthread_queue_work(engine->kworker, &engine->pump_requests);
62 * crypto_pump_requests - dequeue one request from engine queue to process
63 * @engine: the hardware engine
66 * This function checks if there is any request in the engine queue that
70 static void crypto_pump_requests(struct crypto_engine *engine,
79 spin_lock_irqsave(&engine->queue_lock, flags);
82 if (!engine->retry_support && engine->cur_req)
86 if (engine->idling) {
87 kthread_queue_work(engine->kworker, &engine->pump_requests);
91 /* Check if the engine queue is idle */
92 if (!crypto_queue_len(&engine->queue) || !engine->running) {
93 if (!engine->busy)
98 kthread_queue_work(engine->kworker,
99 &engine->pump_requests);
103 engine->busy = false;
104 engine->idling = true;
105 spin_unlock_irqrestore(&engine->queue_lock, flags);
107 if (engine->unprepare_crypt_hardware &&
108 engine->unprepare_crypt_hardware(engine))
109 dev_err(engine->dev, "failed to unprepare crypt hardware\n");
111 spin_lock_irqsave(&engine->queue_lock, flags);
112 engine->idling = false;
117 /* Get the fist request from the engine queue to handle */
118 backlog = crypto_get_backlog(&engine->queue);
119 async_req = crypto_dequeue_request(&engine->queue);
128 if (!engine->retry_support)
129 engine->cur_req = async_req;
134 if (engine->busy)
137 engine->busy = true;
139 spin_unlock_irqrestore(&engine->queue_lock, flags);
142 if (!was_busy && engine->prepare_crypt_hardware) {
143 ret = engine->prepare_crypt_hardware(engine);
145 dev_err(engine->dev, "failed to prepare crypt hardware\n");
153 ret = enginectx->op.prepare_request(engine, async_req);
155 dev_err(engine->dev, "failed to prepare request: %d\n",
161 dev_err(engine->dev, "failed to do request\n");
166 ret = enginectx->op.do_one_request(engine, async_req);
175 if (!engine->retry_support ||
177 dev_err(engine->dev,
185 * enqueue it back into crypto-engine queue.
188 ret = enginectx->op.unprepare_request(engine,
191 dev_err(engine->dev,
194 spin_lock_irqsave(&engine->queue_lock, flags);
197 * back in front of crypto-engine queue, to keep the order
200 crypto_enqueue_request_head(&engine->queue, async_req);
202 kthread_queue_work(engine->kworker, &engine->pump_requests);
210 ret = enginectx->op.unprepare_request(engine, async_req);
212 dev_err(engine->dev, "failed to unprepare request\n");
219 /* If retry mechanism is supported, send new requests to engine */
220 if (engine->retry_support) {
221 spin_lock_irqsave(&engine->queue_lock, flags);
227 spin_unlock_irqrestore(&engine->queue_lock, flags);
233 if (engine->do_batch_requests) {
234 ret = engine->do_batch_requests(engine);
236 dev_err(engine->dev, "failed to do batch requests: %d\n",
245 struct crypto_engine *engine =
248 crypto_pump_requests(engine, true);
252 * crypto_transfer_request - transfer the new request into the engine queue
253 * @engine: the hardware engine
254 * @req: the request need to be listed into the engine queue
256 static int crypto_transfer_request(struct crypto_engine *engine,
263 spin_lock_irqsave(&engine->queue_lock, flags);
265 if (!engine->running) {
266 spin_unlock_irqrestore(&engine->queue_lock, flags);
270 ret = crypto_enqueue_request(&engine->queue, req);
272 if (!engine->busy && need_pump)
273 kthread_queue_work(engine->kworker, &engine->pump_requests);
275 spin_unlock_irqrestore(&engine->queue_lock, flags);
281 * into the engine queue
282 * @engine: the hardware engine
283 * @req: the request need to be listed into the engine queue
285 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
288 return crypto_transfer_request(engine, req, true);
293 * to list into the engine queue
294 * @engine: the hardware engine
295 * @req: the request need to be listed into the engine queue
297 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
300 return crypto_transfer_request_to_engine(engine, &req->base);
306 * to list into the engine queue
307 * @engine: the hardware engine
308 * @req: the request need to be listed into the engine queue
310 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
313 return crypto_transfer_request_to_engine(engine, &req->base);
319 * to list into the engine queue
320 * @engine: the hardware engine
321 * @req: the request need to be listed into the engine queue
323 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
326 return crypto_transfer_request_to_engine(engine, &req->base);
332 * to list into the engine queue
333 * @engine: the hardware engine
334 * @req: the request need to be listed into the engine queue
336 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
339 return crypto_transfer_request_to_engine(engine, &req->base);
346 * @engine: the hardware engine
350 void crypto_finalize_aead_request(struct crypto_engine *engine,
353 return crypto_finalize_request(engine, &req->base, err);
360 * @engine: the hardware engine
364 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
367 return crypto_finalize_request(engine, &req->base, err);
374 * @engine: the hardware engine
378 void crypto_finalize_hash_request(struct crypto_engine *engine,
381 return crypto_finalize_request(engine, &req->base, err);
388 * @engine: the hardware engine
392 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
395 return crypto_finalize_request(engine, &req->base, err);
400 * crypto_engine_start - start the hardware engine
401 * @engine: the hardware engine need to be started
405 int crypto_engine_start(struct crypto_engine *engine)
409 spin_lock_irqsave(&engine->queue_lock, flags);
411 if (engine->running || engine->busy) {
412 spin_unlock_irqrestore(&engine->queue_lock, flags);
416 engine->running = true;
417 spin_unlock_irqrestore(&engine->queue_lock, flags);
419 kthread_queue_work(engine->kworker, &engine->pump_requests);
426 * crypto_engine_stop - stop the hardware engine
427 * @engine: the hardware engine need to be stopped
431 int crypto_engine_stop(struct crypto_engine *engine)
437 spin_lock_irqsave(&engine->queue_lock, flags);
440 * If the engine queue is not empty or the engine is on busy state,
441 * we need to wait for a while to pump the requests of engine queue.
443 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
444 spin_unlock_irqrestore(&engine->queue_lock, flags);
446 spin_lock_irqsave(&engine->queue_lock, flags);
449 if (crypto_queue_len(&engine->queue) || engine->busy)
452 engine->running = false;
454 spin_unlock_irqrestore(&engine->queue_lock, flags);
457 dev_warn(engine->dev, "could not stop engine\n");
464 * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
466 * crypto-engine queue.
467 * @dev: the device attached with one hardware engine
472 * callback(struct crypto_engine *engine)
474 * @engine: the crypto engine structure.
476 * @qlen: maximum size of the crypto-engine queue
479 * Return: the crypto engine structure on success, else NULL.
483 int (*cbk_do_batch)(struct crypto_engine *engine),
486 struct crypto_engine *engine;
491 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
492 if (!engine)
495 engine->dev = dev;
496 engine->rt = rt;
497 engine->running = false;
498 engine->busy = false;
499 engine->idling = false;
500 engine->retry_support = retry_support;
501 engine->priv_data = dev;
506 engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
508 snprintf(engine->name, sizeof(engine->name),
509 "%s-engine", dev_name(dev));
511 crypto_init_queue(&engine->queue, qlen);
512 spin_lock_init(&engine->queue_lock);
514 engine->kworker = kthread_create_worker(0, "%s", engine->name);
515 if (IS_ERR(engine->kworker)) {
519 kthread_init_work(&engine->pump_requests, crypto_pump_work);
521 if (engine->rt) {
523 sched_set_fifo(engine->kworker->task);
526 return engine;
531 * crypto_engine_alloc_init - allocate crypto hardware engine structure and
533 * @dev: the device attached with one hardware engine
537 * Return: the crypto engine structure on success, else NULL.
547 * crypto_engine_exit - free the resources of hardware engine when exit
548 * @engine: the hardware engine need to be freed
552 int crypto_engine_exit(struct crypto_engine *engine)
556 ret = crypto_engine_stop(engine);
560 kthread_destroy_worker(engine->kworker);
567 MODULE_DESCRIPTION("Crypto hardware engine framework");