crypto: engine - support for parallel requests based on retry mechanism

Added support for executing multiple requests, in parallel,
for crypto engine based on a retry mechanism.
If hardware was unable to execute a backlog request, enqueue it
back in front of crypto-engine queue, to keep the order
of requests.

A new variable is added, retry_support (this is to keep the
backward compatibility of crypto-engine) , which keeps track
whether the hardware has support for retry mechanism and,
also, if can run multiple requests.

If do_one_request() returns:
>= 0: hardware executed the request successfully;
< 0: this is the old error path. If hardware has support for retry
mechanism, the request is put back in front of crypto-engine queue.
For backwards compatibility, if the retry support is not available,
the crypto-engine will work as before.
If hardware queue is full (-ENOSPC), requeue request regardless
of MAY_BACKLOG flag.
If hardware throws any other error code (like -EIO, -EINVAL,
-ENOMEM, etc.) only MAY_BACKLOG requests are enqueued back into
crypto-engine's queue, since the others can be dropped.

The new crypto_engine_alloc_init_and_set function, initializes
crypto-engine, sets the maximum size for crypto-engine software
queue (not hardcoded anymore) and the retry_support variable
is set, by default, to false.
On crypto_pump_requests(), if do_one_request() returns >= 0,
a new request is send to hardware, until there is no space in
hardware and do_one_request() returns < 0.

By default, retry_support is false and crypto-engine will
work as before - will send requests to hardware,
one-by-one, on crypto_pump_requests(), and complete it, on
crypto_finalize_request(), and so on.

To support multiple requests, in each driver, retry_support
must be set on true, and if do_one_request() returns an error
the request must not be freed, since it will be enqueued back
into crypto-engine's queue.

When all drivers, that use crypto-engine now, will be updated for
retry mechanism, the retry_support variable can be removed.

Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Iuliana Prodan 2020-04-28 18:49:04 +03:00 committed by Herbert Xu
parent ec6e2bf33b
commit 6a89f492f8
2 changed files with 126 additions and 34 deletions

View file

@ -22,32 +22,36 @@
* @err: error number * @err: error number
*/ */
static void crypto_finalize_request(struct crypto_engine *engine, static void crypto_finalize_request(struct crypto_engine *engine,
struct crypto_async_request *req, int err) struct crypto_async_request *req, int err)
{ {
unsigned long flags; unsigned long flags;
bool finalize_cur_req = false; bool finalize_req = false;
int ret; int ret;
struct crypto_engine_ctx *enginectx; struct crypto_engine_ctx *enginectx;
spin_lock_irqsave(&engine->queue_lock, flags); /*
if (engine->cur_req == req) * If hardware cannot enqueue more requests
finalize_cur_req = true; * and retry mechanism is not supported
spin_unlock_irqrestore(&engine->queue_lock, flags); * make sure we are completing the current request
*/
if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->cur_req == req) {
finalize_req = true;
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
if (finalize_cur_req) { if (finalize_req || engine->retry_support) {
enginectx = crypto_tfm_ctx(req->tfm); enginectx = crypto_tfm_ctx(req->tfm);
if (engine->cur_req_prepared && if (enginectx->op.prepare_request &&
enginectx->op.unprepare_request) { enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, req); ret = enginectx->op.unprepare_request(engine, req);
if (ret) if (ret)
dev_err(engine->dev, "failed to unprepare request\n"); dev_err(engine->dev, "failed to unprepare request\n");
} }
spin_lock_irqsave(&engine->queue_lock, flags);
engine->cur_req = NULL;
engine->cur_req_prepared = false;
spin_unlock_irqrestore(&engine->queue_lock, flags);
} }
req->complete(req, err); req->complete(req, err);
kthread_queue_work(engine->kworker, &engine->pump_requests); kthread_queue_work(engine->kworker, &engine->pump_requests);
@ -74,7 +78,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
spin_lock_irqsave(&engine->queue_lock, flags); spin_lock_irqsave(&engine->queue_lock, flags);
/* Make sure we are not already running a request */ /* Make sure we are not already running a request */
if (engine->cur_req) if (!engine->retry_support && engine->cur_req)
goto out; goto out;
/* If another context is idling then defer */ /* If another context is idling then defer */
@ -108,13 +112,21 @@ static void crypto_pump_requests(struct crypto_engine *engine,
goto out; goto out;
} }
start_request:
/* Get the fist request from the engine queue to handle */ /* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog(&engine->queue); backlog = crypto_get_backlog(&engine->queue);
async_req = crypto_dequeue_request(&engine->queue); async_req = crypto_dequeue_request(&engine->queue);
if (!async_req) if (!async_req)
goto out; goto out;
engine->cur_req = async_req; /*
* If hardware doesn't support the retry mechanism,
* keep track of the request we are processing now.
* We'll need it on completion (crypto_finalize_request).
*/
if (!engine->retry_support)
engine->cur_req = async_req;
if (backlog) if (backlog)
backlog->complete(backlog, -EINPROGRESS); backlog->complete(backlog, -EINPROGRESS);
@ -130,7 +142,7 @@ static void crypto_pump_requests(struct crypto_engine *engine,
ret = engine->prepare_crypt_hardware(engine); ret = engine->prepare_crypt_hardware(engine);
if (ret) { if (ret) {
dev_err(engine->dev, "failed to prepare crypt hardware\n"); dev_err(engine->dev, "failed to prepare crypt hardware\n");
goto req_err; goto req_err_2;
} }
} }
@ -141,28 +153,81 @@ static void crypto_pump_requests(struct crypto_engine *engine,
if (ret) { if (ret) {
dev_err(engine->dev, "failed to prepare request: %d\n", dev_err(engine->dev, "failed to prepare request: %d\n",
ret); ret);
goto req_err; goto req_err_2;
} }
engine->cur_req_prepared = true;
} }
if (!enginectx->op.do_one_request) { if (!enginectx->op.do_one_request) {
dev_err(engine->dev, "failed to do request\n"); dev_err(engine->dev, "failed to do request\n");
ret = -EINVAL; ret = -EINVAL;
goto req_err; goto req_err_1;
} }
ret = enginectx->op.do_one_request(engine, async_req);
if (ret) {
dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret);
goto req_err;
}
return;
req_err: ret = enginectx->op.do_one_request(engine, async_req);
crypto_finalize_request(engine, async_req, ret);
/* Request unsuccessfully executed by hardware */
if (ret < 0) {
/*
* If hardware queue is full (-ENOSPC), requeue request
* regardless of backlog flag.
* If hardware throws any other error code,
* requeue only backlog requests.
* Otherwise, unprepare and complete the request.
*/
if (!engine->retry_support ||
((ret != -ENOSPC) &&
!(async_req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) {
dev_err(engine->dev,
"Failed to do one request from queue: %d\n",
ret);
goto req_err_1;
}
/*
* If retry mechanism is supported,
* unprepare current request and
* enqueue it back into crypto-engine queue.
*/
if (enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine,
async_req);
if (ret)
dev_err(engine->dev,
"failed to unprepare request\n");
}
spin_lock_irqsave(&engine->queue_lock, flags);
/*
* If hardware was unable to execute request, enqueue it
* back in front of crypto-engine queue, to keep the order
* of requests.
*/
crypto_enqueue_request_head(&engine->queue, async_req);
kthread_queue_work(engine->kworker, &engine->pump_requests);
goto out;
}
goto retry;
req_err_1:
if (enginectx->op.unprepare_request) {
ret = enginectx->op.unprepare_request(engine, async_req);
if (ret)
dev_err(engine->dev, "failed to unprepare request\n");
}
req_err_2:
async_req->complete(async_req, ret);
retry:
/* If retry mechanism is supported, send new requests to engine */
if (engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags);
goto start_request;
}
return; return;
out: out:
spin_unlock_irqrestore(&engine->queue_lock, flags); spin_unlock_irqrestore(&engine->queue_lock, flags);
return;
} }
static void crypto_pump_work(struct kthread_work *work) static void crypto_pump_work(struct kthread_work *work)
@ -386,15 +451,20 @@ int crypto_engine_stop(struct crypto_engine *engine)
EXPORT_SYMBOL_GPL(crypto_engine_stop); EXPORT_SYMBOL_GPL(crypto_engine_stop);
/** /**
* crypto_engine_alloc_init - allocate crypto hardware engine structure and * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
* initialize it. * and initialize it by setting the maximum number of entries in the software
* crypto-engine queue.
* @dev: the device attached with one hardware engine * @dev: the device attached with one hardware engine
* @retry_support: whether hardware has support for retry mechanism
* @rt: whether this queue is set to run as a realtime task * @rt: whether this queue is set to run as a realtime task
* @qlen: maximum size of the crypto-engine queue
* *
* This must be called from context that can sleep. * This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL. * Return: the crypto engine structure on success, else NULL.
*/ */
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
bool rt, int qlen)
{ {
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
struct crypto_engine *engine; struct crypto_engine *engine;
@ -411,12 +481,12 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
engine->running = false; engine->running = false;
engine->busy = false; engine->busy = false;
engine->idling = false; engine->idling = false;
engine->cur_req_prepared = false; engine->retry_support = retry_support;
engine->priv_data = dev; engine->priv_data = dev;
snprintf(engine->name, sizeof(engine->name), snprintf(engine->name, sizeof(engine->name),
"%s-engine", dev_name(dev)); "%s-engine", dev_name(dev));
crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); crypto_init_queue(&engine->queue, qlen);
spin_lock_init(&engine->queue_lock); spin_lock_init(&engine->queue_lock);
engine->kworker = kthread_create_worker(0, "%s", engine->name); engine->kworker = kthread_create_worker(0, "%s", engine->name);
@ -433,6 +503,22 @@ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
return engine; return engine;
} }
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
/**
* crypto_engine_alloc_init - allocate crypto hardware engine structure and
* initialize it.
* @dev: the device attached with one hardware engine
* @rt: whether this queue is set to run as a realtime task
*
* This must be called from context that can sleep.
* Return: the crypto engine structure on success, else NULL.
*/
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{
return crypto_engine_alloc_init_and_set(dev, false, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/** /**

View file

@ -24,7 +24,9 @@
* @idling: the engine is entering idle state * @idling: the engine is entering idle state
* @busy: request pump is busy * @busy: request pump is busy
* @running: the engine is on working * @running: the engine is on working
* @cur_req_prepared: current request is prepared * @retry_support: indication that the hardware allows re-execution
* of a failed backlog request
* crypto-engine, in head position to keep order
* @list: link with the global crypto engine list * @list: link with the global crypto engine list
* @queue_lock: spinlock to syncronise access to request queue * @queue_lock: spinlock to syncronise access to request queue
* @queue: the crypto queue of the engine * @queue: the crypto queue of the engine
@ -45,7 +47,8 @@ struct crypto_engine {
bool idling; bool idling;
bool busy; bool busy;
bool running; bool running;
bool cur_req_prepared;
bool retry_support;
struct list_head list; struct list_head list;
spinlock_t queue_lock; spinlock_t queue_lock;
@ -102,6 +105,9 @@ void crypto_finalize_skcipher_request(struct crypto_engine *engine,
int crypto_engine_start(struct crypto_engine *engine); int crypto_engine_start(struct crypto_engine *engine);
int crypto_engine_stop(struct crypto_engine *engine); int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
bool rt, int qlen);
int crypto_engine_exit(struct crypto_engine *engine); int crypto_engine_exit(struct crypto_engine *engine);
#endif /* _CRYPTO_ENGINE_H */ #endif /* _CRYPTO_ENGINE_H */