/** * crypto_finalize_request - finalize one request if the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number
*/ staticvoid crypto_finalize_request(struct crypto_engine *engine, struct crypto_async_request *req, int err)
{ unsignedlong flags;
/* * If hardware cannot enqueue more requests * and retry mechanism is not supported * make sure we are completing the current request
*/ if (!engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags); if (engine->cur_req == req) {
engine->cur_req = NULL;
}
spin_unlock_irqrestore(&engine->queue_lock, flags);
}
/** * crypto_pump_requests - dequeue one request from engine queue to process * @engine: the hardware engine * @in_kthread: true if we are in the context of the request pump thread * * This function checks if there is any request in the engine queue that * needs processing and if so call out to the driver to initialize hardware * and handle each request.
*/ staticvoid crypto_pump_requests(struct crypto_engine *engine, bool in_kthread)
{ struct crypto_async_request *async_req, *backlog; struct crypto_engine_alg *alg; struct crypto_engine_op *op; unsignedlong flags; int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
/* Make sure we are not already running a request */ if (!engine->retry_support && engine->cur_req) goto out;
/* Check if the engine queue is idle */ if (!crypto_queue_len(&engine->queue) || !engine->running) { if (!engine->busy) goto out;
/* Only do teardown in the thread */ if (!in_kthread) {
kthread_queue_work(engine->kworker,
&engine->pump_requests); goto out;
}
engine->busy = false; goto out;
}
start_request: /* Get the fist request from the engine queue to handle */
backlog = crypto_get_backlog(&engine->queue);
async_req = crypto_dequeue_request(&engine->queue); if (!async_req) goto out;
/* * If hardware doesn't support the retry mechanism, * keep track of the request we are processing now. * We'll need it on completion (crypto_finalize_request).
*/ if (!engine->retry_support)
engine->cur_req = async_req;
alg = container_of(async_req->tfm->__crt_alg, struct crypto_engine_alg, base);
op = &alg->op;
ret = op->do_one_request(engine, async_req);
/* Request unsuccessfully executed by hardware */ if (ret < 0) { /* * If hardware queue is full (-ENOSPC), requeue request * regardless of backlog flag. * Otherwise, unprepare and complete the request.
*/ if (!engine->retry_support ||
(ret != -ENOSPC)) {
dev_err(engine->dev, "Failed to do one request from queue: %d\n",
ret); goto req_err_1;
}
spin_lock_irqsave(&engine->queue_lock, flags); /* * If hardware was unable to execute request, enqueue it * back in front of crypto-engine queue, to keep the order * of requests.
*/
crypto_enqueue_request_head(&engine->queue, async_req);
retry: if (backlog)
crypto_request_complete(backlog, -EINPROGRESS);
/* If retry mechanism is supported, send new requests to engine */ if (engine->retry_support) {
spin_lock_irqsave(&engine->queue_lock, flags); goto start_request;
} return;
/** * crypto_transfer_request - transfer the new request into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue * @need_pump: indicates whether queue the pump of request to kthread_work
*/ staticint crypto_transfer_request(struct crypto_engine *engine, struct crypto_async_request *req, bool need_pump)
{ unsignedlong flags; int ret;
spin_lock_irqsave(&engine->queue_lock, flags);
if (!engine->running) {
spin_unlock_irqrestore(&engine->queue_lock, flags); return -ESHUTDOWN;
}
ret = crypto_enqueue_request(&engine->queue, req);
if (!engine->busy && need_pump)
kthread_queue_work(engine->kworker, &engine->pump_requests);
/** * crypto_transfer_request_to_engine - transfer one request to list * into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue
*/ staticint crypto_transfer_request_to_engine(struct crypto_engine *engine, struct crypto_async_request *req)
{ return crypto_transfer_request(engine, req, true);
}
/** * crypto_transfer_aead_request_to_engine - transfer one aead_request * to list into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue
*/ int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, struct aead_request *req)
{ return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
/** * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request * to list into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue
*/ int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, struct akcipher_request *req)
{ return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
/** * crypto_transfer_hash_request_to_engine - transfer one ahash_request * to list into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue
*/ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, struct ahash_request *req)
{ return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
/** * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list * into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue
*/ int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, struct kpp_request *req)
{ return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
/** * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request * to list into the engine queue * @engine: the hardware engine * @req: the request need to be listed into the engine queue
*/ int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, struct skcipher_request *req)
{ return crypto_transfer_request_to_engine(engine, &req->base);
}
EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
/** * crypto_finalize_aead_request - finalize one aead_request if * the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number
*/ void crypto_finalize_aead_request(struct crypto_engine *engine, struct aead_request *req, int err)
{ return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
/** * crypto_finalize_akcipher_request - finalize one akcipher_request if * the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number
*/ void crypto_finalize_akcipher_request(struct crypto_engine *engine, struct akcipher_request *req, int err)
{ return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
/** * crypto_finalize_hash_request - finalize one ahash_request if * the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number
*/ void crypto_finalize_hash_request(struct crypto_engine *engine, struct ahash_request *req, int err)
{ return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
/** * crypto_finalize_kpp_request - finalize one kpp_request if the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number
*/ void crypto_finalize_kpp_request(struct crypto_engine *engine, struct kpp_request *req, int err)
{ return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
/** * crypto_finalize_skcipher_request - finalize one skcipher_request if * the request is done * @engine: the hardware engine * @req: the request need to be finalized * @err: error number
*/ void crypto_finalize_skcipher_request(struct crypto_engine *engine, struct skcipher_request *req, int err)
{ return crypto_finalize_request(engine, &req->base, err);
}
EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
/** * crypto_engine_start - start the hardware engine * @engine: the hardware engine need to be started * * Return 0 on success, else on fail.
*/ int crypto_engine_start(struct crypto_engine *engine)
{ unsignedlong flags;
spin_lock_irqsave(&engine->queue_lock, flags);
if (engine->running || engine->busy) {
spin_unlock_irqrestore(&engine->queue_lock, flags); return -EBUSY;
}
/** * crypto_engine_stop - stop the hardware engine * @engine: the hardware engine need to be stopped * * Return 0 on success, else on fail.
*/ int crypto_engine_stop(struct crypto_engine *engine)
{ unsignedlong flags; unsignedint limit = 500; int ret = 0;
spin_lock_irqsave(&engine->queue_lock, flags);
/* * If the engine queue is not empty or the engine is on busy state, * we need to wait for a while to pump the requests of engine queue.
*/ while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
spin_unlock_irqrestore(&engine->queue_lock, flags);
msleep(20);
spin_lock_irqsave(&engine->queue_lock, flags);
}
if (crypto_queue_len(&engine->queue) || engine->busy)
ret = -EBUSY; else
engine->running = false;
/** * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure * and initialize it by setting the maximum number of entries in the software * crypto-engine queue. * @dev: the device attached with one hardware engine * @retry_support: whether hardware has support for retry mechanism * @rt: whether this queue is set to run as a realtime task * @qlen: maximum size of the crypto-engine queue * * This must be called from context that can sleep. * Return: the crypto engine structure on success, else NULL.
*/ struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, bool rt, int qlen)
{ struct crypto_engine *engine;
if (!dev) return NULL;
engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); if (!engine) return NULL;
/** * crypto_engine_alloc_init - allocate crypto hardware engine structure and * initialize it. * @dev: the device attached with one hardware engine * @rt: whether this queue is set to run as a realtime task * * This must be called from context that can sleep. * Return: the crypto engine structure on success, else NULL.
*/ struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
{ return crypto_engine_alloc_init_and_set(dev, false, rt,
CRYPTO_ENGINE_MAX_QLEN);
}
EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
/** * crypto_engine_exit - free the resources of hardware engine when exit * @engine: the hardware engine need to be freed
*/ void crypto_engine_exit(struct crypto_engine *engine)
{ int ret;
ret = crypto_engine_stop(engine); if (ret) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.