/* * Prepare hash walk helper. * Set up the base hash walk, fill walkaddr and walkbytes. * Returns 0 on success or negative value on error.
*/ staticinlineint hwh_prepare(struct ahash_request *req, struct hash_walk_helper *hwh)
{
hwh->walkbytes = crypto_hash_walk_first(req, &hwh->walk); if (hwh->walkbytes < 0) return hwh->walkbytes;
hwh->walkaddr = hwh->walk.data; return 0;
}
/* * Advance hash walk helper by n bytes. * Progress the walkbytes and walkaddr fields by n bytes. * If walkbytes is then 0, pull next hunk from hash walk * and update walkbytes and walkaddr. * If n is negative, unmap hash walk and return error. * Returns 0 on success or negative value on error.
*/ staticinlineint hwh_advance(struct hash_walk_helper *hwh, int n)
{ if (n < 0) return crypto_hash_walk_done(&hwh->walk, n);
/* phmac tfm context */ struct phmac_tfm_ctx { /* source key material used to derive a protected key from */
u8 keybuf[PHMAC_MAX_KEYSIZE]; unsignedint keylen;
/* cpacf function code to use with this protected key type */ long fc;
/* nr of requests enqueued via crypto engine which use this tfm ctx */
atomic_t via_engine_ctr;
/* spinlock to atomic read/update all the following fields */
spinlock_t pk_lock;
/* see PK_STATE* defines above, < 0 holds convert failure rc */ int pk_state; /* if state is valid, pk holds the protected key */ struct phmac_protkey pk;
};
/* * Convert the raw key material into a protected key via PKEY api. * This function may sleep - don't call in non-sleeping context.
*/ staticinlineint convert_key(const u8 *key, unsignedint keylen, struct phmac_protkey *pk)
{ int rc, i;
pk->len = sizeof(pk->protkey);
/* * In case of a busy card retry with increasing delay * of 200, 400, 800 and 1600 ms - in total 3 s.
*/ for (rc = -EIO, i = 0; rc && i < 5; i++) { if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
rc = -EINTR; goto out;
}
rc = pkey_key2protkey(key, keylen,
pk->protkey, &pk->len, &pk->type,
PKEY_XFLAG_NOMEMALLOC);
}
out:
pr_debug("rc=%d\n", rc); return rc;
}
/* * (Re-)Convert the raw key material from the tfm ctx into a protected * key via convert_key() function. Update the pk_state, pk_type, pk_len * and the protected key in the tfm context. * Please note this function may be invoked concurrently with the very * same tfm context. The pk_lock spinlock in the context ensures an * atomic update of the pk and the pk state but does not guarantee any * order of update. So a fresh converted valid protected key may get * updated with an 'old' expired key value. As the cpacf instructions * detect this, refuse to operate with an invalid key and the calling * code triggers a (re-)conversion this does no harm. This may lead to * unnecessary additional conversion but never to invalid data on the * hash operation.
*/ staticint phmac_convert_key(struct phmac_tfm_ctx *tfm_ctx)
{ struct phmac_protkey pk; int rc;
/* * The walk is always mapped when this function is called. * Note that in case of partial processing or failure the walk * is NOT unmapped here. So a follow up task may reuse the walk * or in case of unrecoverable failure needs to unmap it.
*/
if (offset) { /* fill ctx buffer up to blocksize and process this block */
n = bs - offset;
memcpy(ctx->buf + offset, hwh->walkaddr, n);
ctx->gr0.iimp = 1; for (;;) {
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, ctx->buf, bs); if (likely(k == bs)) break; if (unlikely(k > 0)) { /* * Can't deal with hunks smaller than blocksize. * And kmac should always return the nr of * processed bytes as 0 or a multiple of the * blocksize.
*/
rc = -EIO; goto out;
} /* protected key is invalid and needs re-conversion */ if (!maysleep) {
rc = -EKEYEXPIRED; goto out;
}
rc = phmac_convert_key(tfm_ctx); if (rc) goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
spin_unlock_bh(&tfm_ctx->pk_lock);
}
ctx->buflen[0] += n; if (ctx->buflen[0] < n)
ctx->buflen[1]++;
rc = hwh_advance(hwh, n); if (unlikely(rc)) goto out;
offset = 0;
}
/* process as many blocks as possible from the walk */ while (hwh->walkbytes >= bs) {
n = (hwh->walkbytes / bs) * bs;
ctx->gr0.iimp = 1;
k = _cpacf_kmac(&ctx->gr0.reg, ctx->param, hwh->walkaddr, n); if (likely(k > 0)) {
ctx->buflen[0] += k; if (ctx->buflen[0] < k)
ctx->buflen[1]++;
rc = hwh_advance(hwh, k); if (unlikely(rc)) goto out;
} if (unlikely(k < n)) { /* protected key is invalid and needs re-conversion */ if (!maysleep) {
rc = -EKEYEXPIRED; goto out;
}
rc = phmac_convert_key(tfm_ctx); if (rc) goto out;
spin_lock_bh(&tfm_ctx->pk_lock);
memcpy(ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
spin_unlock_bh(&tfm_ctx->pk_lock);
}
}
store: /* store incomplete block in context buffer */ if (hwh->walkbytes) {
memcpy(ctx->buf + offset, hwh->walkaddr, hwh->walkbytes);
ctx->buflen[0] += hwh->walkbytes; if (ctx->buflen[0] < hwh->walkbytes)
ctx->buflen[1]++;
rc = hwh_advance(hwh, hwh->walkbytes); if (unlikely(rc)) goto out;
}
/* zero request context (includes the kmac sha2 context) */
memset(req_ctx, 0, sizeof(*req_ctx));
/* * setkey() should have set a valid fc into the tfm context. * Copy this function code into the gr0 field of the kmac context.
*/ if (!tfm_ctx->fc) {
rc = -ENOKEY; goto out;
}
kmac_ctx->gr0.fc = tfm_ctx->fc;
/* * Copy the pk from tfm ctx into kmac ctx. The protected key * may be outdated but update() and final() will handle this.
*/
spin_lock_bh(&tfm_ctx->pk_lock);
memcpy(kmac_ctx->param + SHA2_KEY_OFFSET(bs),
tfm_ctx->pk.protkey, tfm_ctx->pk.len);
spin_unlock_bh(&tfm_ctx->pk_lock);
/* prep the walk in the request context */
rc = hwh_prepare(req, hwh); if (rc) goto out;
/* Try synchronous operation if no active engine usage */ if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false); if (rc == 0) goto out;
}
/* * If sync operation failed or key expired or there are already * requests enqueued via engine, fallback to async. Mark tfm as * using engine to serialize requests.
*/ if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->async_op = OP_UPDATE;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); if (rc != -EINPROGRESS)
atomic_dec(&tfm_ctx->via_engine_ctr);
}
if (rc != -EINPROGRESS) {
hwh_advance(hwh, rc);
memzero_explicit(kmac_ctx, sizeof(*kmac_ctx));
}
/* Try synchronous operation if no active engine usage */ if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false); if (rc == 0) goto out;
}
/* * If sync operation failed or key expired or there are already * requests enqueued via engine, fallback to async. Mark tfm as * using engine to serialize requests.
*/ if (rc == 0 || rc == -EKEYEXPIRED) {
req_ctx->async_op = OP_FINAL;
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); if (rc != -EINPROGRESS)
atomic_dec(&tfm_ctx->via_engine_ctr);
}
/* prep the walk in the request context */
rc = hwh_prepare(req, hwh); if (rc) goto out;
req_ctx->async_op = OP_FINUP;
/* Try synchronous operations if no active engine usage */ if (!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_update(req, false); if (rc == 0)
req_ctx->async_op = OP_FINAL;
} if (!rc && req_ctx->async_op == OP_FINAL &&
!atomic_read(&tfm_ctx->via_engine_ctr)) {
rc = phmac_kmac_final(req, false); if (rc == 0) goto out;
}
/* * If sync operation failed or key expired or there are already * requests enqueued via engine, fallback to async. Mark tfm as * using engine to serialize requests.
*/ if (rc == 0 || rc == -EKEYEXPIRED) { /* req->async_op has been set to either OP_FINUP or OP_FINAL */
atomic_inc(&tfm_ctx->via_engine_ctr);
rc = crypto_transfer_hash_request_to_engine(phmac_crypto_engine, req); if (rc != -EINPROGRESS)
atomic_dec(&tfm_ctx->via_engine_ctr);
}
if (!crypto_ahash_tested(tfm)) { /* * selftest running: key is a raw hmac clear key and needs * to get embedded into a 'clear key token' in order to have * it correctly processed by the pkey module.
*/
tmpkeylen = sizeof(struct hmac_clrkey_token) + bs;
tmpkey = kzalloc(tmpkeylen, GFP_KERNEL); if (!tmpkey) {
rc = -ENOMEM; goto out;
}
rc = make_clrkey_token(key, keylen, ds, tmpkey); if (rc) goto out;
keylen = tmpkeylen;
key = tmpkey;
}
/* copy raw key into tfm context */
rc = phmac_tfm_ctx_setkey(tfm_ctx, key, keylen); if (rc) goto out;
/* convert raw key into protected key */
rc = phmac_convert_key(tfm_ctx); if (rc) goto out;
/* set function code in tfm context, check for valid pk type */ switch (ds) { case SHA224_DIGEST_SIZE: if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
rc = -EINVAL; else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_224; break; case SHA256_DIGEST_SIZE: if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_512)
rc = -EINVAL; else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_256; break; case SHA384_DIGEST_SIZE: if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
rc = -EINVAL; else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_384; break; case SHA512_DIGEST_SIZE: if (tfm_ctx->pk.type != PKEY_KEYTYPE_HMAC_1024)
rc = -EINVAL; else
tfm_ctx->fc = CPACF_KMAC_PHMAC_SHA_512; break; default:
tfm_ctx->fc = 0;
rc = -EINVAL;
}
/* * Three kinds of requests come in here: * 1. req->async_op == OP_UPDATE with req->nbytes > 0 * 2. req->async_op == OP_FINUP with req->nbytes > 0 * 3. req->async_op == OP_FINAL * For update and finup the hwh walk has already been prepared * by the caller. For final there is no hwh walk needed.
*/
switch (req_ctx->async_op) { case OP_UPDATE: case OP_FINUP:
rc = phmac_kmac_update(req, true); if (rc == -EKEYEXPIRED) { /* * Protected key expired, conversion is in process. * Trigger a re-schedule of this request by returning * -ENOSPC ("hardware queue full") to the crypto engine. * To avoid immediately re-invocation of this callback, * tell scheduler to voluntarily give up the CPU here.
*/
pr_debug("rescheduling request\n");
cond_resched(); return -ENOSPC;
} elseif (rc) {
hwh_advance(hwh, rc); goto out;
} if (req_ctx->async_op == OP_UPDATE) break;
req_ctx->async_op = OP_FINAL;
fallthrough; case OP_FINAL:
rc = phmac_kmac_final(req, true); if (rc == -EKEYEXPIRED) { /* * Protected key expired, conversion is in process. * Trigger a re-schedule of this request by returning * -ENOSPC ("hardware queue full") to the crypto engine. * To avoid immediately re-invocation of this callback, * tell scheduler to voluntarily give up the CPU here.
*/
pr_debug("rescheduling request\n");
cond_resched(); return -ENOSPC;
} break; default: /* unknown/unsupported/unimplemented asynch op */ return -EOPNOTSUPP;
}
staticvoid s390_phmac_exit(void)
{ struct phmac_alg *phmac; int i;
if (phmac_crypto_engine) {
crypto_engine_stop(phmac_crypto_engine);
crypto_engine_exit(phmac_crypto_engine);
}
for (i = ARRAY_SIZE(phmac_algs) - 1; i >= 0; i--) {
phmac = &phmac_algs[i]; if (phmac->registered)
crypto_engine_unregister_ahash(&phmac->alg);
}
misc_deregister(&phmac_dev);
}
staticint __init s390_phmac_init(void)
{ struct phmac_alg *phmac; int i, rc;
/* for selftest cpacf klmd subfunction is needed */ if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_256)) return -ENODEV; if (!cpacf_query_func(CPACF_KLMD, CPACF_KLMD_SHA_512)) return -ENODEV;
/* register a simple phmac pseudo misc device */
rc = misc_register(&phmac_dev); if (rc) return rc;
/* with this pseudo device alloc and start a crypto engine */
phmac_crypto_engine =
crypto_engine_alloc_init_and_set(phmac_dev.this_device, true, false, MAX_QLEN); if (!phmac_crypto_engine) {
rc = -ENOMEM; goto out_err;
}
rc = crypto_engine_start(phmac_crypto_engine); if (rc) {
crypto_engine_exit(phmac_crypto_engine);
phmac_crypto_engine = NULL; goto out_err;
}
for (i = 0; i < ARRAY_SIZE(phmac_algs); i++) {
phmac = &phmac_algs[i]; if (!cpacf_query_func(CPACF_KMAC, phmac->fc)) continue;
rc = crypto_engine_register_ahash(&phmac->alg); if (rc) goto out_err;
phmac->registered = true;
pr_debug("%s registered\n", phmac->alg.base.halg.base.cra_name);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.