/* * While polling on a busy HCU, wait maximum 200us between one check and the * other.
*/ #define OCS_HCU_WAIT_BUSY_RETRY_DELAY_US 200 /* Wait on a busy HCU for maximum 1 second. */ #define OCS_HCU_WAIT_BUSY_TIMEOUT_US 1000000
/** * struct ocs_hcu_dma_entry - An entry in an OCS DMA linked list. * @src_addr: Source address of the data. * @src_len: Length of data to be fetched. * @nxt_desc: Next descriptor to fetch. * @ll_flags: Flags (Freeze @ terminate) for the DMA engine.
*/ struct ocs_hcu_dma_entry {
u32 src_addr;
u32 src_len;
u32 nxt_desc;
u32 ll_flags;
};
/** * struct ocs_hcu_dma_list - OCS-specific DMA linked list. * @head: The head of the list (points to the array backing the list). * @tail: The current tail of the list; NULL if the list is empty. * @dma_addr: The DMA address of @head (i.e., the DMA address of the backing * array). * @max_nents: Maximum number of entries in the list (i.e., number of elements * in the backing array). * * The OCS DMA list is an array-backed list of OCS DMA descriptors. The array * backing the list is allocated with dma_alloc_coherent() and pointed by * @head.
*/ struct ocs_hcu_dma_list { struct ocs_hcu_dma_entry *head; struct ocs_hcu_dma_entry *tail;
dma_addr_t dma_addr;
size_t max_nents;
};
staticinline u32 ocs_hcu_num_chains(enum ocs_hcu_algo algo)
{ switch (algo) { case OCS_HCU_ALGO_SHA224: case OCS_HCU_ALGO_SHA256: case OCS_HCU_ALGO_SM3: return OCS_HCU_NUM_CHAINS_SHA256_224_SM3; case OCS_HCU_ALGO_SHA384: case OCS_HCU_ALGO_SHA512: return OCS_HCU_NUM_CHAINS_SHA384_512; default: return 0;
};
}
staticinline u32 ocs_hcu_digest_size(enum ocs_hcu_algo algo)
{ switch (algo) { case OCS_HCU_ALGO_SHA224: return SHA224_DIGEST_SIZE; case OCS_HCU_ALGO_SHA256: case OCS_HCU_ALGO_SM3: /* SM3 shares the same block size. */ return SHA256_DIGEST_SIZE; case OCS_HCU_ALGO_SHA384: return SHA384_DIGEST_SIZE; case OCS_HCU_ALGO_SHA512: return SHA512_DIGEST_SIZE; default: return 0;
}
}
/** * ocs_hcu_wait_busy() - Wait for HCU OCS hardware to became usable. * @hcu_dev: OCS HCU device to wait for. * * Return: 0 if device free, -ETIMEOUT if device busy and internal timeout has * expired.
*/ staticint ocs_hcu_wait_busy(struct ocs_hcu_dev *hcu_dev)
{ long val;
staticint ocs_hcu_wait_and_disable_irq(struct ocs_hcu_dev *hcu_dev)
{ int rc;
rc = wait_for_completion_interruptible(&hcu_dev->irq_done); if (rc) gotoexit;
if (hcu_dev->irq_err) { /* Unset flag and return error. */
hcu_dev->irq_err = false;
rc = -EIO; gotoexit;
}
exit:
ocs_hcu_irq_dis(hcu_dev);
return rc;
}
/** * ocs_hcu_get_intermediate_data() - Get intermediate data. * @hcu_dev: The target HCU device. * @data: Where to store the intermediate. * @algo: The algorithm being used. * * This function is used to save the current hashing process state in order to * continue it in the future. * * Note: once all data has been processed, the intermediate data actually * contains the hashing result. So this function is also used to retrieve the * final result of a hashing process. * * Return: 0 on success, negative error code otherwise.
*/ staticint ocs_hcu_get_intermediate_data(struct ocs_hcu_dev *hcu_dev, struct ocs_hcu_idata *data, enum ocs_hcu_algo algo)
{ constint n = ocs_hcu_num_chains(algo);
u32 *chain; int rc; int i;
/* Data not requested. */ if (!data) return -EINVAL;
chain = (u32 *)data->digest;
/* Ensure that the OCS is no longer busy before reading the chains. */
rc = ocs_hcu_wait_busy(hcu_dev); if (rc) return rc;
/* * This loops is safe because data->digest is an array of * SHA512_DIGEST_SIZE bytes and the maximum value returned by * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal * to SHA512_DIGEST_SIZE / sizeof(u32).
*/ for (i = 0; i < n; i++)
chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
/** * ocs_hcu_set_intermediate_data() - Set intermediate data. * @hcu_dev: The target HCU device. * @data: The intermediate data to be set. * @algo: The algorithm being used. * * This function is used to continue a previous hashing process.
*/ staticvoid ocs_hcu_set_intermediate_data(struct ocs_hcu_dev *hcu_dev, conststruct ocs_hcu_idata *data, enum ocs_hcu_algo algo)
{ constint n = ocs_hcu_num_chains(algo);
u32 *chain = (u32 *)data->digest; int i;
/* * This loops is safe because data->digest is an array of * SHA512_DIGEST_SIZE bytes and the maximum value returned by * ocs_hcu_num_chains() is OCS_HCU_NUM_CHAINS_SHA384_512 which is equal * to SHA512_DIGEST_SIZE / sizeof(u32).
*/ for (i = 0; i < n; i++)
writel(chain[i], hcu_dev->io_base + OCS_HCU_CHAIN);
staticint ocs_hcu_get_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo, u8 *dgst, size_t dgst_len)
{
u32 *chain; int rc; int i;
if (!dgst) return -EINVAL;
/* Length of the output buffer must match the algo digest size. */ if (dgst_len != ocs_hcu_digest_size(algo)) return -EINVAL;
/* Ensure that the OCS is no longer busy before reading the chains. */
rc = ocs_hcu_wait_busy(hcu_dev); if (rc) return rc;
chain = (u32 *)dgst; for (i = 0; i < dgst_len / sizeof(u32); i++)
chain[i] = readl(hcu_dev->io_base + OCS_HCU_CHAIN);
return 0;
}
/** * ocs_hcu_hw_cfg() - Configure the HCU hardware. * @hcu_dev: The HCU device to configure. * @algo: The algorithm to be used by the HCU device. * @use_hmac: Whether or not HW HMAC should be used. * * Return: 0 on success, negative error code otherwise.
*/ staticint ocs_hcu_hw_cfg(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo, bool use_hmac)
{
u32 cfg; int rc;
/** * ocs_hcu_write_key() - Write key to OCS HMAC KEY registers. * @hcu_dev: The OCS HCU device the key should be written to. * @key: The key to be written. * @len: The size of the key to write. It must be OCS_HCU_HW_KEY_LEN. * * Return: 0 on success, negative error code otherwise.
*/ staticint ocs_hcu_write_key(struct ocs_hcu_dev *hcu_dev, const u8 *key, size_t len)
{
u32 key_u32[OCS_HCU_HW_KEY_LEN_U32]; int i;
/* * Hardware requires all the bytes of the HW Key vector to be * written. So pad with zero until we reach OCS_HCU_HW_KEY_LEN.
*/
memzero_explicit((u8 *)key_u32 + len, OCS_HCU_HW_KEY_LEN - len);
/* * OCS hardware expects the MSB of the key to be written at the highest * address of the HCU Key vector; in other word, the key must be * written in reverse order. * * Therefore, we first enable byte swapping for the HCU key vector; * so that bytes of 32-bit word written to OCS_HCU_KEY_[0..15] will be * swapped: * 3 <---> 0, 2 <---> 1.
*/
writel(HCU_BYTE_ORDER_SWAP,
hcu_dev->io_base + OCS_HCU_KEY_BYTE_ORDER_CFG); /* * And then we write the 32-bit words composing the key starting from * the end of the key.
*/ for (i = 0; i < OCS_HCU_HW_KEY_LEN_U32; i++)
writel(key_u32[OCS_HCU_HW_KEY_LEN_U32 - 1 - i],
hcu_dev->io_base + OCS_HCU_KEY_0 + (sizeof(u32) * i));
memzero_explicit(key_u32, OCS_HCU_HW_KEY_LEN);
return 0;
}
/** * ocs_hcu_ll_dma_start() - Start OCS HCU hashing via DMA * @hcu_dev: The OCS HCU device to use. * @dma_list: The OCS DMA list mapping the data to hash. * @finalize: Whether or not this is the last hashing operation and therefore * the final hash should be compute even if data is not * block-aligned. * * Return: 0 on success, negative error code otherwise.
*/ staticint ocs_hcu_ll_dma_start(struct ocs_hcu_dev *hcu_dev, conststruct ocs_hcu_dma_list *dma_list, bool finalize)
{
u32 cfg = HCU_DMA_SNOOP_MASK | HCU_DMA_SRC_LL_EN | HCU_DMA_EN; int rc;
if (!dma_list) return -EINVAL;
/* * For final requests we use HCU_DONE IRQ to be notified when all input * data has been processed by the HCU; however, we cannot do so for * non-final requests, because we don't get a HCU_DONE IRQ when we * don't terminate the operation. * * Therefore, for non-final requests, we use the DMA IRQ, which * triggers when DMA has finishing feeding all the input data to the * HCU, but the HCU may still be processing it. This is fine, since we * will wait for the HCU processing to be completed when we try to read * intermediate results, in ocs_hcu_get_intermediate_data().
*/ if (finalize)
ocs_hcu_done_irq_en(hcu_dev); else
ocs_hcu_dma_irq_en(hcu_dev);
/* Add a new DMA entry at the end of the OCS DMA list. */ int ocs_hcu_dma_list_add_tail(struct ocs_hcu_dev *hcu_dev, struct ocs_hcu_dma_list *dma_list,
dma_addr_t addr, u32 len)
{ struct device *dev = hcu_dev->dev; struct ocs_hcu_dma_entry *old_tail; struct ocs_hcu_dma_entry *new_tail;
if (!len) return 0;
if (!dma_list) return -EINVAL;
if (addr & ~OCS_HCU_DMA_BIT_MASK) {
dev_err(dev, "Unexpected error: Invalid DMA address for OCS HCU\n"); return -EINVAL;
}
/* Check if list is full. */ if (new_tail - dma_list->head >= dma_list->max_nents) return -ENOMEM;
/* * If there was an old tail (i.e., this is not the first element we are * adding), un-terminate the old tail and make it point to the new one.
*/ if (old_tail) {
old_tail->ll_flags &= ~OCS_LL_DMA_FLAG_TERMINATE; /* * The old tail 'nxt_desc' must point to the DMA address of the * new tail.
*/
old_tail->nxt_desc = dma_list->dma_addr + sizeof(*dma_list->tail) * (new_tail -
dma_list->head);
}
/* Update list tail with new tail. */
dma_list->tail = new_tail;
return 0;
}
/** * ocs_hcu_hash_init() - Initialize hash operation context. * @ctx: The context to initialize. * @algo: The hashing algorithm to use. * * Return: 0 on success, negative error code otherwise.
*/ int ocs_hcu_hash_init(struct ocs_hcu_hash_ctx *ctx, enum ocs_hcu_algo algo)
{ if (!ctx) return -EINVAL;
ctx->algo = algo;
ctx->idata.msg_len_lo = 0;
ctx->idata.msg_len_hi = 0; /* No need to set idata.digest to 0. */
return 0;
}
/** * ocs_hcu_hash_update() - Perform a hashing iteration. * @hcu_dev: The OCS HCU device to use. * @ctx: The OCS HCU hashing context. * @dma_list: The OCS DMA list mapping the input data to process. * * Return: 0 on success; negative error code otherwise.
*/ int ocs_hcu_hash_update(struct ocs_hcu_dev *hcu_dev, struct ocs_hcu_hash_ctx *ctx, conststruct ocs_hcu_dma_list *dma_list)
{ int rc;
if (!hcu_dev || !ctx) return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false); if (rc) return rc;
/* If we already processed some data, idata needs to be set. */ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
/** * ocs_hcu_hash_finup() - Update and finalize hash computation. * @hcu_dev: The OCS HCU device to use. * @ctx: The OCS HCU hashing context. * @dma_list: The OCS DMA list mapping the input data to process. * @dgst: The buffer where to save the computed digest. * @dgst_len: The length of @dgst. * * Return: 0 on success; negative error code otherwise.
*/ int ocs_hcu_hash_finup(struct ocs_hcu_dev *hcu_dev, conststruct ocs_hcu_hash_ctx *ctx, conststruct ocs_hcu_dma_list *dma_list,
u8 *dgst, size_t dgst_len)
{ int rc;
if (!hcu_dev || !ctx) return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false); if (rc) return rc;
/* If we already processed some data, idata needs to be set. */ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
/* Get digest and return. */ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
}
/** * ocs_hcu_hash_final() - Finalize hash computation. * @hcu_dev: The OCS HCU device to use. * @ctx: The OCS HCU hashing context. * @dgst: The buffer where to save the computed digest. * @dgst_len: The length of @dgst. * * Return: 0 on success; negative error code otherwise.
*/ int ocs_hcu_hash_final(struct ocs_hcu_dev *hcu_dev, conststruct ocs_hcu_hash_ctx *ctx, u8 *dgst,
size_t dgst_len)
{ int rc;
if (!hcu_dev || !ctx) return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, ctx->algo, false); if (rc) return rc;
/* If we already processed some data, idata needs to be set. */ if (ctx->idata.msg_len_lo || ctx->idata.msg_len_hi)
ocs_hcu_set_intermediate_data(hcu_dev, &ctx->idata, ctx->algo);
/* * Enable HCU interrupts, so that HCU_DONE will be triggered once the * final hash is computed.
*/
ocs_hcu_done_irq_en(hcu_dev);
reinit_completion(&hcu_dev->irq_done);
writel(OCS_HCU_TERMINATE, hcu_dev->io_base + OCS_HCU_OPERATION);
rc = ocs_hcu_wait_and_disable_irq(hcu_dev); if (rc) return rc;
/* Get digest and return. */ return ocs_hcu_get_digest(hcu_dev, ctx->algo, dgst, dgst_len);
}
/** * ocs_hcu_digest() - Compute hash digest. * @hcu_dev: The OCS HCU device to use. * @algo: The hash algorithm to use. * @data: The input data to process. * @data_len: The length of @data. * @dgst: The buffer where to save the computed digest. * @dgst_len: The length of @dgst. * * Return: 0 on success; negative error code otherwise.
*/ int ocs_hcu_digest(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo, void *data, size_t data_len, u8 *dgst, size_t dgst_len)
{ struct device *dev = hcu_dev->dev;
dma_addr_t dma_handle;
u32 reg; int rc;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, algo, false); if (rc) return rc;
/** * ocs_hcu_hmac() - Compute HMAC. * @hcu_dev: The OCS HCU device to use. * @algo: The hash algorithm to use with HMAC. * @key: The key to use. * @dma_list: The OCS DMA list mapping the input data to process. * @key_len: The length of @key. * @dgst: The buffer where to save the computed HMAC. * @dgst_len: The length of @dgst. * * Return: 0 on success; negative error code otherwise.
*/ int ocs_hcu_hmac(struct ocs_hcu_dev *hcu_dev, enum ocs_hcu_algo algo, const u8 *key, size_t key_len, conststruct ocs_hcu_dma_list *dma_list,
u8 *dgst, size_t dgst_len)
{ int rc;
/* Ensure 'key' is not NULL. */ if (!key || key_len == 0) return -EINVAL;
/* Configure the hardware for the current request. */
rc = ocs_hcu_hw_cfg(hcu_dev, algo, true); if (rc) return rc;
rc = ocs_hcu_write_key(hcu_dev, key, key_len); if (rc) return rc;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.