/* * Key blobs smaller/bigger than these defines are rejected * by the common code even before the individual setkey function * is called. As paes can handle different kinds of key blobs * and padding is also possible, the limits need to be generous.
*/ #define PAES_MIN_KEYSIZE 16 #define PAES_MAX_KEYSIZE MAXEP11AESKEYBLOBSIZE #define PAES_256_PROTKEY_SIZE (32 + 32) /* key + verification pattern */ #define PXTS_256_PROTKEY_SIZE (32 + 32 + 32) /* k1 + k2 + verification pattern */
struct s390_paes_ctx { /* source key material used to derive a protected key from */
u8 keybuf[PAES_MAX_KEYSIZE]; unsignedint keylen;
/* cpacf function code to use with this protected key type */ long fc;
/* nr of requests enqueued via crypto engine which use this tfm ctx */
atomic_t via_engine_ctr;
/* spinlock to atomic read/update all the following fields */
spinlock_t pk_lock;
/* see PK_STATE* defines above, < 0 holds convert failure rc */ int pk_state; /* if state is valid, pk holds the protected key */ struct paes_protkey pk;
};
struct s390_pxts_ctx { /* source key material used to derive a protected key from */
u8 keybuf[2 * PAES_MAX_KEYSIZE]; unsignedint keylen;
/* cpacf function code to use with this protected key type */ long fc;
/* nr of requests enqueued via crypto engine which use this tfm ctx */
atomic_t via_engine_ctr;
/* spinlock to atomic read/update all the following fields */
spinlock_t pk_lock;
/* see PK_STATE* defines above, < 0 holds convert failure rc */ int pk_state; /* if state is valid, pk[] hold(s) the protected key(s) */ struct paes_protkey pk[2];
};
/* * make_clrkey_token() - wrap the raw key ck with pkey clearkey token * information. * @returns the size of the clearkey token
*/ staticinline u32 make_clrkey_token(const u8 *ck, size_t cklen, u8 *dest)
{ struct clrkey_token {
u8 type;
u8 res0[3];
u8 version;
u8 res1[3];
u32 keytype;
u32 len;
u8 key[];
} __packed *token = (struct clrkey_token *)dest;
/* * paes_ctx_setkey() - Set key value into context, maybe construct * a clear key token digestible by pkey from a clear key value.
*/ staticinlineint paes_ctx_setkey(struct s390_paes_ctx *ctx, const u8 *key, unsignedint keylen)
{ if (keylen > sizeof(ctx->keybuf)) return -EINVAL;
switch (keylen) { case 16: case 24: case 32: /* clear key value, prepare pkey clear key token in keybuf */
memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
ctx->keylen = make_clrkey_token(key, keylen, ctx->keybuf); break; default: /* other key material, let pkey handle this */
memcpy(ctx->keybuf, key, keylen);
ctx->keylen = keylen; break;
}
return 0;
}
/* * pxts_ctx_setkey() - Set key value into context, maybe construct * a clear key token digestible by pkey from a clear key value.
*/ staticinlineint pxts_ctx_setkey(struct s390_pxts_ctx *ctx, const u8 *key, unsignedint keylen)
{
size_t cklen = keylen / 2;
if (keylen > sizeof(ctx->keybuf)) return -EINVAL;
switch (keylen) { case 32: case 64: /* clear key value, prepare pkey clear key tokens in keybuf */
memset(ctx->keybuf, 0, sizeof(ctx->keybuf));
ctx->keylen = make_clrkey_token(key, cklen, ctx->keybuf);
ctx->keylen += make_clrkey_token(key + cklen, cklen,
ctx->keybuf + ctx->keylen); break; default: /* other key material, let pkey handle this */
memcpy(ctx->keybuf, key, keylen);
ctx->keylen = keylen; break;
}
return 0;
}
/* * Convert the raw key material into a protected key via PKEY api. * This function may sleep - don't call in non-sleeping context.
*/ staticinlineint convert_key(const u8 *key, unsignedint keylen, struct paes_protkey *pk)
{ int rc, i;
pk->len = sizeof(pk->protkey);
/* * In case of a busy card retry with increasing delay * of 200, 400, 800 and 1600 ms - in total 3 s.
*/ for (rc = -EIO, i = 0; rc && i < 5; i++) { if (rc == -EBUSY && msleep_interruptible((1 << i) * 100)) {
rc = -EINTR; goto out;
}
rc = pkey_key2protkey(key, keylen,
pk->protkey, &pk->len, &pk->type,
PKEY_XFLAG_NOMEMALLOC);
}
out:
pr_debug("rc=%d\n", rc); return rc;
}
/* * (Re-)Convert the raw key material from the ctx into a protected key * via convert_key() function. Update the pk_state, pk_type, pk_len * and the protected key in the tfm context. * Please note this function may be invoked concurrently with the very * same tfm context. The pk_lock spinlock in the context ensures an * atomic update of the pk and the pk state but does not guarantee any * order of update. So a fresh converted valid protected key may get * updated with an 'old' expired key value. As the cpacf instructions * detect this, refuse to operate with an invalid key and the calling * code triggers a (re-)conversion this does no harm. This may lead to * unnecessary additional conversion but never to invalid data on en- * or decrypt operations.
*/ staticint paes_convert_key(struct s390_paes_ctx *ctx)
{ struct paes_protkey pk; int rc;
/* * (Re-)Convert the raw xts key material from the ctx into a * protected key via convert_key() function. Update the pk_state, * pk_type, pk_len and the protected key in the tfm context. * See also comments on function paes_convert_key.
*/ staticint pxts_convert_key(struct s390_pxts_ctx *ctx)
{ struct paes_protkey pk0, pk1;
size_t split_keylen; int rc;
staticint ecb_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsignedint key_len)
{ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); long fc; int rc;
/* set raw key into context */
rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) goto out;
/* convert key into protected key */
rc = paes_convert_key(ctx); if (rc) goto out;
/* Pick the correct function code based on the protected key type */ switch (ctx->pk.type) { case PKEY_KEYTYPE_AES_128:
fc = CPACF_KM_PAES_128; break; case PKEY_KEYTYPE_AES_192:
fc = CPACF_KM_PAES_192; break; case PKEY_KEYTYPE_AES_256:
fc = CPACF_KM_PAES_256; break; default:
fc = 0; break;
}
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
if (!req_ctx->param_init_done) { /* fetch and check protected key state */
spin_lock_bh(&ctx->pk_lock);
pk_state = ctx->pk_state; switch (pk_state) { case PK_STATE_NO_KEY:
rc = -ENOKEY; break; case PK_STATE_CONVERT_IN_PROGRESS:
rc = -EKEYEXPIRED; break; case PK_STATE_VALID:
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
req_ctx->param_init_done = true; break; default:
rc = pk_state < 0 ? pk_state : -EIO; break;
}
spin_unlock_bh(&ctx->pk_lock);
} if (rc) goto out;
/* * Note that in case of partial processing or failure the walk * is NOT unmapped here. So a follow up task may reuse the walk * or in case of unrecoverable failure needs to unmap it.
*/ while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | req_ctx->modifier, param,
walk->dst.virt.addr, walk->src.virt.addr, n); if (k)
rc = skcipher_walk_done(walk, nbytes - k); if (k < n) { if (!maysleep) {
rc = -EKEYEXPIRED; goto out;
}
rc = paes_convert_key(ctx); if (rc) goto out;
spin_lock_bh(&ctx->pk_lock);
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
spin_unlock_bh(&ctx->pk_lock);
}
}
/* * Attempt synchronous encryption first. If it fails, schedule the request * asynchronously via the crypto engine. To preserve execution order, * once a request is queued to the engine, further requests using the same * tfm will also be routed through the engine.
*/
rc = skcipher_walk_virt(walk, req, false); if (rc) goto out;
/* Try synchronous operation if no active engine usage */ if (!atomic_read(&ctx->via_engine_ctr)) {
rc = ecb_paes_do_crypt(ctx, req_ctx, false); if (rc == 0) goto out;
}
/* * If sync operation failed or key expired or there are already * requests enqueued via engine, fallback to async. Mark tfm as * using engine to serialize requests.
*/ if (rc == 0 || rc == -EKEYEXPIRED) {
atomic_inc(&ctx->via_engine_ctr);
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); if (rc != -EINPROGRESS)
atomic_dec(&ctx->via_engine_ctr);
}
if (rc != -EINPROGRESS)
skcipher_walk_done(walk, rc);
rc = ecb_paes_do_crypt(ctx, req_ctx, true); if (rc == -EKEYEXPIRED) { /* * Protected key expired, conversion is in process. * Trigger a re-schedule of this request by returning * -ENOSPC ("hardware queue is full") to the crypto engine. * To avoid immediately re-invocation of this callback, * tell the scheduler to voluntarily give up the CPU here.
*/
cond_resched();
pr_debug("rescheduling request\n"); return -ENOSPC;
} elseif (rc) {
skcipher_walk_done(walk, rc);
}
staticint cbc_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsignedint key_len)
{ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); long fc; int rc;
/* set raw key into context */
rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) goto out;
/* convert raw key into protected key */
rc = paes_convert_key(ctx); if (rc) goto out;
/* Pick the correct function code based on the protected key type */ switch (ctx->pk.type) { case PKEY_KEYTYPE_AES_128:
fc = CPACF_KMC_PAES_128; break; case PKEY_KEYTYPE_AES_192:
fc = CPACF_KMC_PAES_192; break; case PKEY_KEYTYPE_AES_256:
fc = CPACF_KMC_PAES_256; break; default:
fc = 0; break;
}
ctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
if (!req_ctx->param_init_done) { /* fetch and check protected key state */
spin_lock_bh(&ctx->pk_lock);
pk_state = ctx->pk_state; switch (pk_state) { case PK_STATE_NO_KEY:
rc = -ENOKEY; break; case PK_STATE_CONVERT_IN_PROGRESS:
rc = -EKEYEXPIRED; break; case PK_STATE_VALID:
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
req_ctx->param_init_done = true; break; default:
rc = pk_state < 0 ? pk_state : -EIO; break;
}
spin_unlock_bh(&ctx->pk_lock);
} if (rc) goto out;
memcpy(param->iv, walk->iv, AES_BLOCK_SIZE);
/* * Note that in case of partial processing or failure the walk * is NOT unmapped here. So a follow up task may reuse the walk * or in case of unrecoverable failure needs to unmap it.
*/ while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_kmc(ctx->fc | req_ctx->modifier, param,
walk->dst.virt.addr, walk->src.virt.addr, n); if (k) {
memcpy(walk->iv, param->iv, AES_BLOCK_SIZE);
rc = skcipher_walk_done(walk, nbytes - k);
} if (k < n) { if (!maysleep) {
rc = -EKEYEXPIRED; goto out;
}
rc = paes_convert_key(ctx); if (rc) goto out;
spin_lock_bh(&ctx->pk_lock);
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
spin_unlock_bh(&ctx->pk_lock);
}
}
/* * Attempt synchronous encryption first. If it fails, schedule the request * asynchronously via the crypto engine. To preserve execution order, * once a request is queued to the engine, further requests using the same * tfm will also be routed through the engine.
*/
rc = skcipher_walk_virt(walk, req, false); if (rc) goto out;
/* Try synchronous operation if no active engine usage */ if (!atomic_read(&ctx->via_engine_ctr)) {
rc = cbc_paes_do_crypt(ctx, req_ctx, false); if (rc == 0) goto out;
}
/* * If sync operation failed or key expired or there are already * requests enqueued via engine, fallback to async. Mark tfm as * using engine to serialize requests.
*/ if (rc == 0 || rc == -EKEYEXPIRED) {
atomic_inc(&ctx->via_engine_ctr);
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); if (rc != -EINPROGRESS)
atomic_dec(&ctx->via_engine_ctr);
}
if (rc != -EINPROGRESS)
skcipher_walk_done(walk, rc);
rc = cbc_paes_do_crypt(ctx, req_ctx, true); if (rc == -EKEYEXPIRED) { /* * Protected key expired, conversion is in process. * Trigger a re-schedule of this request by returning * -ENOSPC ("hardware queue is full") to the crypto engine. * To avoid immediately re-invocation of this callback, * tell the scheduler to voluntarily give up the CPU here.
*/
cond_resched();
pr_debug("rescheduling request\n"); return -ENOSPC;
} elseif (rc) {
skcipher_walk_done(walk, rc);
}
staticint ctr_paes_setkey(struct crypto_skcipher *tfm, const u8 *in_key, unsignedint key_len)
{ struct s390_paes_ctx *ctx = crypto_skcipher_ctx(tfm); long fc; int rc;
/* set raw key into context */
rc = paes_ctx_setkey(ctx, in_key, key_len); if (rc) goto out;
/* convert raw key into protected key */
rc = paes_convert_key(ctx); if (rc) goto out;
/* Pick the correct function code based on the protected key type */ switch (ctx->pk.type) { case PKEY_KEYTYPE_AES_128:
fc = CPACF_KMCTR_PAES_128; break; case PKEY_KEYTYPE_AES_192:
fc = CPACF_KMCTR_PAES_192; break; case PKEY_KEYTYPE_AES_256:
fc = CPACF_KMCTR_PAES_256; break; default:
fc = 0; break;
}
ctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
if (!req_ctx->param_init_done) { /* fetch and check protected key state */
spin_lock_bh(&ctx->pk_lock);
pk_state = ctx->pk_state; switch (pk_state) { case PK_STATE_NO_KEY:
rc = -ENOKEY; break; case PK_STATE_CONVERT_IN_PROGRESS:
rc = -EKEYEXPIRED; break; case PK_STATE_VALID:
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
req_ctx->param_init_done = true; break; default:
rc = pk_state < 0 ? pk_state : -EIO; break;
}
spin_unlock_bh(&ctx->pk_lock);
} if (rc) goto out;
locked = mutex_trylock(&ctrblk_lock);
/* * Note that in case of partial processing or failure the walk * is NOT unmapped here. So a follow up task may reuse the walk * or in case of unrecoverable failure needs to unmap it.
*/ while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
n = AES_BLOCK_SIZE; if (nbytes >= 2 * AES_BLOCK_SIZE && locked)
n = __ctrblk_init(ctrblk, walk->iv, nbytes);
ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
k = cpacf_kmctr(ctx->fc, param, walk->dst.virt.addr,
walk->src.virt.addr, n, ctrptr); if (k) { if (ctrptr == ctrblk)
memcpy(walk->iv, ctrptr + k - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(walk->iv, AES_BLOCK_SIZE);
rc = skcipher_walk_done(walk, nbytes - k);
} if (k < n) { if (!maysleep) { if (locked)
mutex_unlock(&ctrblk_lock);
rc = -EKEYEXPIRED; goto out;
}
rc = paes_convert_key(ctx); if (rc) { if (locked)
mutex_unlock(&ctrblk_lock); goto out;
}
spin_lock_bh(&ctx->pk_lock);
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
spin_unlock_bh(&ctx->pk_lock);
}
} if (locked)
mutex_unlock(&ctrblk_lock);
/* final block may be < AES_BLOCK_SIZE, copy only nbytes */ if (nbytes) {
memset(buf, 0, AES_BLOCK_SIZE);
memcpy(buf, walk->src.virt.addr, nbytes); while (1) { if (cpacf_kmctr(ctx->fc, param, buf,
buf, AES_BLOCK_SIZE,
walk->iv) == AES_BLOCK_SIZE) break; if (!maysleep) {
rc = -EKEYEXPIRED; goto out;
}
rc = paes_convert_key(ctx); if (rc) goto out;
spin_lock_bh(&ctx->pk_lock);
memcpy(param->key, ctx->pk.protkey, sizeof(param->key));
spin_unlock_bh(&ctx->pk_lock);
}
memcpy(walk->dst.virt.addr, buf, nbytes);
crypto_inc(walk->iv, AES_BLOCK_SIZE);
rc = skcipher_walk_done(walk, 0);
}
/* * Attempt synchronous encryption first. If it fails, schedule the request * asynchronously via the crypto engine. To preserve execution order, * once a request is queued to the engine, further requests using the same * tfm will also be routed through the engine.
*/
rc = skcipher_walk_virt(walk, req, false); if (rc) goto out;
req_ctx->param_init_done = false;
/* Try synchronous operation if no active engine usage */ if (!atomic_read(&ctx->via_engine_ctr)) {
rc = ctr_paes_do_crypt(ctx, req_ctx, false); if (rc == 0) goto out;
}
/* * If sync operation failed or key expired or there are already * requests enqueued via engine, fallback to async. Mark tfm as * using engine to serialize requests.
*/ if (rc == 0 || rc == -EKEYEXPIRED) {
atomic_inc(&ctx->via_engine_ctr);
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); if (rc != -EINPROGRESS)
atomic_dec(&ctx->via_engine_ctr);
}
if (rc != -EINPROGRESS)
skcipher_walk_done(walk, rc);
rc = ctr_paes_do_crypt(ctx, req_ctx, true); if (rc == -EKEYEXPIRED) { /* * Protected key expired, conversion is in process. * Trigger a re-schedule of this request by returning * -ENOSPC ("hardware queue is full") to the crypto engine. * To avoid immediately re-invocation of this callback, * tell the scheduler to voluntarily give up the CPU here.
*/
cond_resched();
pr_debug("rescheduling request\n"); return -ENOSPC;
} elseif (rc) {
skcipher_walk_done(walk, rc);
}
/* set raw key into context */
rc = pxts_ctx_setkey(ctx, in_key, in_keylen); if (rc) goto out;
/* convert raw key(s) into protected key(s) */
rc = pxts_convert_key(ctx); if (rc) goto out;
/* * xts_verify_key verifies the key length is not odd and makes * sure that the two keys are not the same. This can be done * on the two protected keys as well - but not for full xts keys.
*/ if (ctx->pk[0].type == PKEY_KEYTYPE_AES_128 ||
ctx->pk[0].type == PKEY_KEYTYPE_AES_256) {
ckey_len = (ctx->pk[0].type == PKEY_KEYTYPE_AES_128) ?
AES_KEYSIZE_128 : AES_KEYSIZE_256;
memcpy(ckey, ctx->pk[0].protkey, ckey_len);
memcpy(ckey + ckey_len, ctx->pk[1].protkey, ckey_len);
rc = xts_verify_key(tfm, ckey, 2 * ckey_len);
memzero_explicit(ckey, sizeof(ckey)); if (rc) goto out;
}
/* Pick the correct function code based on the protected key type */ switch (ctx->pk[0].type) { case PKEY_KEYTYPE_AES_128:
fc = CPACF_KM_PXTS_128; break; case PKEY_KEYTYPE_AES_256:
fc = CPACF_KM_PXTS_256; break; case PKEY_KEYTYPE_AES_XTS_128:
fc = CPACF_KM_PXTS_128_FULL; break; case PKEY_KEYTYPE_AES_XTS_256:
fc = CPACF_KM_PXTS_256_FULL; break; default:
fc = 0; break;
}
ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
/* * Note that in case of partial processing or failure the walk * is NOT unmapped here. So a follow up task may reuse the walk * or in case of unrecoverable failure needs to unmap it.
*/ while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
walk->dst.virt.addr, walk->src.virt.addr, n); if (k)
rc = skcipher_walk_done(walk, nbytes - k); if (k < n) { if (!maysleep) {
rc = -EKEYEXPIRED; goto out;
}
rc = pxts_convert_key(ctx); if (rc) goto out;
spin_lock_bh(&ctx->pk_lock);
memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
memcpy(param->wkvp, ctx->pk[0].protkey + keylen, sizeof(param->wkvp));
spin_unlock_bh(&ctx->pk_lock);
}
}
if (!req_ctx->param_init_done) {
rc = __xts_2keys_prep_param(ctx, param, walk,
keylen, offset, maysleep); if (rc) goto out;
req_ctx->param_init_done = true;
}
/* * Note that in case of partial processing or failure the walk * is NOT unmapped here. So a follow up task may reuse the walk * or in case of unrecoverable failure needs to unmap it.
*/ while ((nbytes = walk->nbytes) != 0) { /* only use complete blocks */
n = nbytes & ~(AES_BLOCK_SIZE - 1);
k = cpacf_km(ctx->fc | req_ctx->modifier, param->key + offset,
walk->dst.virt.addr, walk->src.virt.addr, n); if (k)
rc = skcipher_walk_done(walk, nbytes - k); if (k < n) { if (!maysleep) {
rc = -EKEYEXPIRED; goto out;
}
rc = pxts_convert_key(ctx); if (rc) goto out;
spin_lock_bh(&ctx->pk_lock);
memcpy(param->key + offset, ctx->pk[0].protkey, keylen);
spin_unlock_bh(&ctx->pk_lock);
}
}
/* fetch and check protected key state */
spin_lock_bh(&ctx->pk_lock);
pk_state = ctx->pk_state; switch (pk_state) { case PK_STATE_NO_KEY:
rc = -ENOKEY; break; case PK_STATE_CONVERT_IN_PROGRESS:
rc = -EKEYEXPIRED; break; case PK_STATE_VALID: break; default:
rc = pk_state < 0 ? pk_state : -EIO; break;
}
spin_unlock_bh(&ctx->pk_lock); if (rc) goto out;
/* Call the 'real' crypt function based on the xts prot key type. */ switch (ctx->fc) { case CPACF_KM_PXTS_128: case CPACF_KM_PXTS_256:
rc = xts_paes_do_crypt_2keys(ctx, req_ctx, maysleep); break; case CPACF_KM_PXTS_128_FULL: case CPACF_KM_PXTS_256_FULL:
rc = xts_paes_do_crypt_fullkey(ctx, req_ctx, maysleep); break; default:
rc = -EINVAL;
}
/* * Attempt synchronous encryption first. If it fails, schedule the request * asynchronously via the crypto engine. To preserve execution order, * once a request is queued to the engine, further requests using the same * tfm will also be routed through the engine.
*/
rc = skcipher_walk_virt(walk, req, false); if (rc) goto out;
/* Try synchronous operation if no active engine usage */ if (!atomic_read(&ctx->via_engine_ctr)) {
rc = xts_paes_do_crypt(ctx, req_ctx, false); if (rc == 0) goto out;
}
/* * If sync operation failed or key expired or there are already * requests enqueued via engine, fallback to async. Mark tfm as * using engine to serialize requests.
*/ if (rc == 0 || rc == -EKEYEXPIRED) {
atomic_inc(&ctx->via_engine_ctr);
rc = crypto_transfer_skcipher_request_to_engine(paes_crypto_engine, req); if (rc != -EINPROGRESS)
atomic_dec(&ctx->via_engine_ctr);
}
if (rc != -EINPROGRESS)
skcipher_walk_done(walk, rc);
rc = xts_paes_do_crypt(ctx, req_ctx, true); if (rc == -EKEYEXPIRED) { /* * Protected key expired, conversion is in process. * Trigger a re-schedule of this request by returning * -ENOSPC ("hardware queue is full") to the crypto engine. * To avoid immediately re-invocation of this callback, * tell the scheduler to voluntarily give up the CPU here.
*/
cond_resched();
pr_debug("rescheduling request\n"); return -ENOSPC;
} elseif (rc) {
skcipher_walk_done(walk, rc);
}
staticinlinevoid __crypto_unregister_skcipher(struct skcipher_engine_alg *alg)
{ if (!list_empty(&alg->base.base.cra_list))
crypto_engine_unregister_skcipher(alg);
}
staticvoid paes_s390_fini(void)
{ if (paes_crypto_engine) {
crypto_engine_stop(paes_crypto_engine);
crypto_engine_exit(paes_crypto_engine);
}
__crypto_unregister_skcipher(&ctr_paes_alg);
__crypto_unregister_skcipher(&xts_paes_alg);
__crypto_unregister_skcipher(&cbc_paes_alg);
__crypto_unregister_skcipher(&ecb_paes_alg); if (ctrblk)
free_page((unsignedlong)ctrblk);
misc_deregister(&paes_dev);
}
staticint __init paes_s390_init(void)
{ int rc;
/* register a simple paes pseudo misc device */
rc = misc_register(&paes_dev); if (rc) return rc;
/* with this pseudo devie alloc and start a crypto engine */
paes_crypto_engine =
crypto_engine_alloc_init_and_set(paes_dev.this_device, true, false, MAX_QLEN); if (!paes_crypto_engine) {
rc = -ENOMEM; goto out_err;
}
rc = crypto_engine_start(paes_crypto_engine); if (rc) {
crypto_engine_exit(paes_crypto_engine);
paes_crypto_engine = NULL; goto out_err;
}
/* Query available functions for KM, KMC and KMCTR */
cpacf_query(CPACF_KM, &km_functions);
cpacf_query(CPACF_KMC, &kmc_functions);
cpacf_query(CPACF_KMCTR, &kmctr_functions);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.