/* The PDMA is a DMA-engine tightly coupled with a ciphering engine. * It operates on a descriptor array with up to 64 descriptor entries. * The arrays must be 64 byte aligned in memory. * * The ciphering unit has no registers and is completely controlled by * a 4-byte metadata that is inserted at the beginning of each dma packet. * * A dma packet is a sequence of descriptors terminated by setting the .eop * field in the final descriptor of the packet. * * Multiple packets are used for providing context data, key data and * the plain/ciphertext. * * PDMA Descriptors (Array) * +------+------+------+~~+-------+------+---- * | 0 | 1 | 2 |~~| 11 EOP| 12 | .... * +--+---+--+---+----+-+~~+-------+----+-+---- * | | | | | * | | | | | * __|__ +-------++-------++-------+ +----+ * | MD | |Payload||Payload||Payload| | MD | * +-----+ +-------++-------++-------+ +----+
*/
struct artpec6_crypto_bounce_buffer { struct list_head list;
size_t length; struct scatterlist *sg;
size_t offset; /* buf is aligned to ARTPEC_CACHE_LINE_MAX and * holds up to ARTPEC_CACHE_LINE_MAX bytes data.
*/ void *buf;
};
if (variant == ARTPEC6_CRYPTO) {
writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH);
writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH);
writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD);
} else {
writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH);
writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH);
writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD);
}
writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH);
writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD);
/** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a * physical address * * @addr: The physical address of the data buffer * @len: The length of the data buffer * @eop: True if this is the last buffer in the packet * * @return 0 on success or -ENOSPC if there are no more descriptors available
*/ staticint
artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common,
dma_addr_t addr, size_t len, bool eop)
{ struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d;
if (dma->out_cnt >= PDMA_DESCR_COUNT ||
fault_inject_dma_descr()) {
pr_err("No free OUT DMA descriptors available!\n"); return -ENOSPC;
}
d = &dma->out[dma->out_cnt++];
memset(d, 0, sizeof(*d));
/** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor * * @dst: The virtual address of the data * @len: The length of the data, must be between 1 to 7 bytes * @eop: True if this is the last buffer in the packet * * @return 0 on success * -ENOSPC if no more descriptors are available * -EINVAL if the data length exceeds 7 bytes
*/ staticint
artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common, void *dst, unsignedint len, bool eop)
{ struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d;
if (dma->out_cnt >= PDMA_DESCR_COUNT ||
fault_inject_dma_descr()) {
pr_err("No free OUT DMA descriptors available!\n"); return -ENOSPC;
} elseif (len > 7 || len < 1) { return -EINVAL;
}
d = &dma->out[dma->out_cnt++];
memset(d, 0, sizeof(*d));
ret = artpec6_crypto_dma_map_single(common, dma->in, sizeof(dma->in[0]) * dma->in_cnt,
DMA_TO_DEVICE, &dma->in_dma_addr); if (ret) return ret;
ret = artpec6_crypto_dma_map_single(common, dma->out, sizeof(dma->out[0]) * dma->out_cnt,
DMA_TO_DEVICE, &dma->out_dma_addr); if (ret) return ret;
/* We only read one stat descriptor */
dma->stat[dma->in_cnt - 1] = 0;
/* * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor * to be written.
*/ return artpec6_crypto_dma_map_single(common,
dma->stat, sizeof(dma->stat[0]) * dma->in_cnt,
DMA_BIDIRECTIONAL,
&dma->stat_dma_addr);
}
/** artpec6_crypto_setup_out_descr - Setup an out descriptor * * @dst: The virtual address of the data * @len: The length of the data * @eop: True if this is the last buffer in the packet * @use_short: If this is true and the data length is 7 bytes or less then * a short descriptor will be used * * @return 0 on success * Any errors from artpec6_crypto_setup_out_descr_short() or * setup_out_descr_phys()
*/ staticint
artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common, void *dst, unsignedint len, bool eop, bool use_short)
{ if (use_short && len < 7) { return artpec6_crypto_setup_out_descr_short(common, dst, len,
eop);
} else { int ret;
dma_addr_t dma_addr;
ret = artpec6_crypto_dma_map_single(common, dst, len,
DMA_TO_DEVICE,
&dma_addr); if (ret) return ret;
/** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a * physical address * * @addr: The physical address of the data buffer * @len: The length of the data buffer * @intr: True if an interrupt should be fired after HW processing of this * descriptor *
*/ staticint
artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common,
dma_addr_t addr, unsignedint len, bool intr)
{ struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d;
if (dma->in_cnt >= PDMA_DESCR_COUNT ||
fault_inject_dma_descr()) {
pr_err("No free IN DMA descriptors available!\n"); return -ENOSPC;
}
d = &dma->in[dma->in_cnt++];
memset(d, 0, sizeof(*d));
/** artpec6_crypto_setup_in_descr - Setup an in channel descriptor * * @buffer: The virtual address to of the data buffer * @len: The length of the data buffer * @last: If this is the last data buffer in the request (i.e. an interrupt * is needed * * Short descriptors are not used for the in channel
*/ staticint
artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common, void *buffer, unsignedint len, bool last)
{
dma_addr_t dma_addr; int ret;
ret = artpec6_crypto_dma_map_single(common, buffer, len,
DMA_FROM_DEVICE, &dma_addr); if (ret) return ret;
/* When destination buffers are not aligned to the cache line * size we need bounce buffers. The DMA-API requires that the * entire line is owned by the DMA buffer and this holds also * for the case when coherent DMA is used.
*/ if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) {
chunk = min_t(dma_addr_t, chunk,
ALIGN(addr, ARTPEC_CACHE_LINE_MAX) -
addr);
if (count)
pr_err("EOL unexpected %zu bytes left\n", count);
return count ? -EINVAL : 0;
}
/** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor * * If the out descriptor list is non-empty, then the eop flag on the * last used out descriptor will be set. * * @return 0 on success * -EINVAL if the out descriptor is empty or has overflown
*/ staticint
artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common)
{ struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d;
if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) {
pr_err("%s: OUT descriptor list is %s\n",
MODULE_NAME, dma->out_cnt ? "empty" : "full"); return -EINVAL;
}
d = &dma->out[dma->out_cnt-1];
d->ctrl.eop = 1;
return 0;
}
/** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last * in descriptor * * See artpec6_crypto_terminate_out_descrs() for return values
*/ staticint
artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common)
{ struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d;
if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) {
pr_err("%s: IN descriptor list is %s\n",
MODULE_NAME, dma->in_cnt ? "empty" : "full"); return -EINVAL;
}
d = &dma->in[dma->in_cnt-1];
d->ctrl.intr = 1; return 0;
}
/** create_hash_pad - Create a Secure Hash conformant pad * * @dst: The destination buffer to write the pad. Must be at least 64 bytes * @dgstlen: The total length of the hash digest in bytes * @bitcount: The total length of the digest in bits * * @return The total number of padding bytes written to @dst
*/ static size_t
create_hash_pad(int oper, unsignedchar *dst, u64 dgstlen, u64 bitcount)
{ unsignedint mod, target, diff, pad_bytes, size_bytes;
__be64 bits = __cpu_to_be64(bitcount);
switch (oper) { case regk_crypto_sha1: case regk_crypto_sha256: case regk_crypto_hmac_sha1: case regk_crypto_hmac_sha256:
target = 448 / 8;
mod = 512 / 8;
size_bytes = 8; break; default:
target = 896 / 8;
mod = 1024 / 8;
size_bytes = 16; break;
}
/* * The hardware uses only the last 32-bits as the counter while the * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that * the whole IV is a counter. So fallback if the counter is going to * overlow.
*/ if (counter + nblks < counter) { int ret;
/* Upload HMAC key, must be first the first packet */ if (req_ctx->hash_flags & HASH_FLAG_HMAC) { if (variant == ARTPEC6_CRYPTO) {
req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER,
a6_regk_crypto_dlkey);
} else {
req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER,
a7_regk_crypto_dlkey);
}
/* Copy and pad up the key */
memcpy(req_ctx->key_buffer, ctx->hmac_key,
ctx->hmac_key_length);
memset(req_ctx->key_buffer + ctx->hmac_key_length, 0,
blocksize - ctx->hmac_key_length);
/* If this is the final round, set the final flag */ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN;
} else {
req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX;
req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx);
/* If this is the final round, set the final flag */ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE)
req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN;
}
/* Setup up metadata descriptors */
error = artpec6_crypto_setup_out_descr(common,
(void *)&req_ctx->hash_md, sizeof(req_ctx->hash_md), false, false); if (error) return error;
error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); if (error) return error;
if (ext_ctx) {
error = artpec6_crypto_setup_out_descr(common,
req_ctx->digeststate,
contextsize, false, false);
run_hw = ready_bytes > 0; if (req_ctx->partial_bytes && ready_bytes) { /* We have a partial buffer and will at least some bytes * to the HW. Empty this partial buffer before tackling * the SG lists
*/
memcpy(req_ctx->partial_buffer_out,
req_ctx->partial_buffer,
req_ctx->partial_bytes);
/* Descriptor for the final result */
error = artpec6_crypto_setup_in_descr(common, areq->result,
digestsize, true); if (error) return error;
} else { /* This is not the final operation for this request */ if (!run_hw) return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START;
/* Save the result to the context */
error = artpec6_crypto_setup_in_descr(common,
req_ctx->digeststate,
contextsize, false); if (error) return error; /* fall through */
}
/** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request * * @req: The asynch request to process * * @return 0 if the dma job was successfully prepared * <0 on error * * This function sets up the PDMA descriptors for a block cipher request. * * The required padding is added for AES-CTR using a statically defined * buffer. * * The PDMA descriptor list will be as follows: * * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop> * IN: <CIPHER_MD><data_0>...[data_n]<intr> *
*/ staticint artpec6_crypto_prepare_crypto(struct skcipher_request *areq)
{ int ret; struct artpec6_crypto_walk walk; struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); struct artpec6_crypto_request_context *req_ctx = NULL;
size_t iv_len = crypto_skcipher_ivsize(cipher); struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; struct artpec6_crypto_req_common *common; bool cipher_decr = false;
size_t cipher_klen;
u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */
u32 oper;
req_ctx = skcipher_request_ctx(areq);
common = &req_ctx->common;
ret = artpec6_crypto_setup_out_descr(common,
&req_ctx->cipher_md, sizeof(req_ctx->cipher_md), false, false); if (ret) return ret;
ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); if (ret) return ret;
if (iv_len) {
ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len, false, false); if (ret) return ret;
} /* Data out */
artpec6_crypto_walk_init(&walk, areq->src);
ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen); if (ret) return ret;
/* Data in */
artpec6_crypto_walk_init(&walk, areq->dst);
ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen); if (ret) return ret;
/* CTR-mode padding required by the HW. */ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR ||
ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) {
size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) -
areq->cryptlen;
if (pad) {
ret = artpec6_crypto_setup_out_descr(common,
ac->pad_buffer,
pad, false, false); if (ret) return ret;
ret = artpec6_crypto_setup_in_descr(common,
ac->pad_buffer, pad, false); if (ret) return ret;
}
}
ret = artpec6_crypto_terminate_out_descrs(common); if (ret) return ret;
ret = artpec6_crypto_terminate_in_descrs(common); if (ret) return ret;
ret = artpec6_crypto_setup_out_descr(common,
(void *) &req_ctx->cipher_md, sizeof(req_ctx->cipher_md), false, false); if (ret) return ret;
ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); if (ret) return ret;
/* For the decryption, cryptlen includes the tag. */
input_length = areq->cryptlen; if (req_ctx->decrypt)
input_length -= crypto_aead_authsize(cipher);
/* Prepare the context buffer */
req_ctx->hw_ctx.aad_length_bits =
__cpu_to_be64(8*areq->assoclen);
memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); // The HW omits the initial increment of the counter field.
memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4);
ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); if (ret) return ret;
{ struct artpec6_crypto_walk walk;
artpec6_crypto_walk_init(&walk, areq->src);
/* Associated data */
count = areq->assoclen;
ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); if (ret) return ret;
if (!IS_ALIGNED(areq->assoclen, 16)) {
size_t assoc_pad = 16 - (areq->assoclen % 16); /* The HW mandates zero padding here */
ret = artpec6_crypto_setup_out_descr(common,
ac->zero_buffer,
assoc_pad, false, false); if (ret) return ret;
}
/* Data to crypto */
count = input_length;
ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); if (ret) return ret;
if (!IS_ALIGNED(input_length, 16)) {
size_t crypto_pad = 16 - (input_length % 16); /* The HW mandates zero padding here */
ret = artpec6_crypto_setup_out_descr(common,
ac->zero_buffer,
crypto_pad, false, false); if (ret) return ret;
}
}
/* Data from crypto */
{ struct artpec6_crypto_walk walk;
size_t output_len = areq->cryptlen;
if (req_ctx->decrypt)
output_len -= crypto_aead_authsize(cipher);
artpec6_crypto_walk_init(&walk, areq->dst);
/* skip associated data in the output */
count = artpec6_crypto_walk_advance(&walk, areq->assoclen); if (count) return -EINVAL;
count = output_len;
ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count); if (ret) return ret;
/* Put padding between the cryptotext and the auth tag */ if (!IS_ALIGNED(output_len, 16)) {
size_t crypto_pad = 16 - (output_len % 16);
ret = artpec6_crypto_setup_in_descr(common,
ac->pad_buffer,
crypto_pad, false); if (ret) return ret;
}
/* The authentication tag shall follow immediately after * the output ciphertext. For decryption it is put in a context * buffer for later compare against the input tag.
*/
if (req_ctx->decrypt) {
ret = artpec6_crypto_setup_in_descr(common,
req_ctx->decryption_tag, AES_BLOCK_SIZE, false); if (ret) return ret;
} else { /* For encryption the requested tag size may be smaller * than the hardware's generated tag.
*/
size_t authsize = crypto_aead_authsize(cipher);
ret = artpec6_crypto_setup_sg_descrs_in(common, &walk,
authsize); if (ret) return ret;
if (authsize < AES_BLOCK_SIZE) {
count = AES_BLOCK_SIZE - authsize;
ret = artpec6_crypto_setup_in_descr(common,
ac->pad_buffer,
count, false); if (ret) return ret;
}
}
}
ret = artpec6_crypto_terminate_in_descrs(common); if (ret) return ret;
ret = artpec6_crypto_terminate_out_descrs(common); if (ret) return ret;
/* * In some cases, the hardware can raise an in_eop_flush interrupt * before actually updating the status, so we have an timer which will * recheck the status on timeout. Since the cases are expected to be * very rare, we use a relatively large timeout value. There should be * no noticeable negative effect if we timeout spuriously.
*/ if (ac->pending_count)
mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100)); else
timer_delete(&ac->timer);
}
/* Perform the completion callbacks without holding the queue lock * to allow new request submissions from the callbacks.
*/
list_for_each_entry_safe(req, n, &complete_done, list) {
artpec6_crypto_dma_unmap_all(req);
artpec6_crypto_copy_bounce_buffers(req);
artpec6_crypto_common_destroy(req);
req->complete(req->req);
}
list_for_each_entry_safe(req, n, &complete_in_progress,
complete_in_progress) {
crypto_request_complete(req->req, -EINPROGRESS);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.