/* * CESA_SA_FPGA_INT_STATUS looks like an FPGA leftover and is documented only * in Errata 4.12. It looks like that it was part of an IRQ-controller in FPGA * and someone forgot to remove it while switching to the core and moving to * CESA_SA_INT_STATUS.
*/ #define CESA_SA_FPGA_INT_STATUS 0xdd68 #define CESA_SA_INT_STATUS 0xde20 #define CESA_SA_INT_AUTH_DONE BIT(0) #define CESA_SA_INT_DES_E_DONE BIT(1) #define CESA_SA_INT_AES_E_DONE BIT(2) #define CESA_SA_INT_AES_D_DONE BIT(3) #define CESA_SA_INT_ENC_DONE BIT(4) #define CESA_SA_INT_ACCEL0_DONE BIT(5) #define CESA_SA_INT_ACCEL1_DONE BIT(6) #define CESA_SA_INT_ACC0_IDMA_DONE BIT(7) #define CESA_SA_INT_ACC1_IDMA_DONE BIT(8) #define CESA_SA_INT_IDMA_DONE BIT(9) #define CESA_SA_INT_IDMA_OWN_ERR BIT(10)
/** * struct mv_cesa_tdma_desc - TDMA descriptor * @byte_cnt: number of bytes to transfer * @src: DMA address of the source * @dst: DMA address of the destination * @next_dma: DMA address of the next TDMA descriptor * @cur_dma: DMA address of this TDMA descriptor * @next: pointer to the next TDMA descriptor * @op: CESA operation attached to this TDMA descriptor * @data: raw data attached to this TDMA descriptor * @flags: flags describing the TDMA transfer. See the * "TDMA descriptor flags" section above * * TDMA descriptor used to create a transfer chain describing a crypto * operation.
*/ struct mv_cesa_tdma_desc {
__le32 byte_cnt; union {
__le32 src;
u32 src_dma;
}; union {
__le32 dst;
u32 dst_dma;
};
__le32 next_dma;
/* Software state */
dma_addr_t cur_dma; struct mv_cesa_tdma_desc *next; union { struct mv_cesa_op_ctx *op; void *data;
};
u32 flags;
};
/** * struct mv_cesa_sg_dma_iter - scatter-gather iterator * @dir: transfer direction * @sg: scatter list * @offset: current position in the scatter list * @op_offset: current position in the crypto operation * * Iterator used to iterate over a scatterlist while creating a TDMA chain for * a crypto operation.
*/ struct mv_cesa_sg_dma_iter { enum dma_data_direction dir; struct scatterlist *sg; unsignedint offset; unsignedint op_offset;
};
/** * struct mv_cesa_dma_iter - crypto operation iterator * @len: the crypto operation length * @offset: current position in the crypto operation * @op_len: sub-operation length (the crypto engine can only act on 2kb * chunks) * * Iterator used to create a TDMA chain for a given crypto operation.
*/ struct mv_cesa_dma_iter { unsignedint len; unsignedint offset; unsignedint op_len;
};
/** * struct mv_cesa_tdma_chain - TDMA chain * @first: first entry in the TDMA chain * @last: last entry in the TDMA chain * * Stores a TDMA chain for a specific crypto operation.
*/ struct mv_cesa_tdma_chain { struct mv_cesa_tdma_desc *first; struct mv_cesa_tdma_desc *last;
};
struct mv_cesa_engine;
/** * struct mv_cesa_caps - CESA device capabilities * @engines: number of engines * @has_tdma: whether this device has a TDMA block * @cipher_algs: supported cipher algorithms * @ncipher_algs: number of supported cipher algorithms * @ahash_algs: supported hash algorithms * @nahash_algs: number of supported hash algorithms * * Structure used to describe CESA device capabilities.
*/ struct mv_cesa_caps { int nengines; bool has_tdma; struct skcipher_alg **cipher_algs; int ncipher_algs; struct ahash_alg **ahash_algs; int nahash_algs;
};
/** * struct mv_cesa_dev_dma - DMA pools * @tdma_desc_pool: TDMA desc pool * @op_pool: crypto operation pool * @cache_pool: data cache pool (used by hash implementation when the * hash request is smaller than the hash block size) * @padding_pool: padding pool (used by hash implementation when hardware * padding cannot be used) * * Structure containing the different DMA pools used by this driver.
*/ struct mv_cesa_dev_dma { struct dma_pool *tdma_desc_pool; struct dma_pool *op_pool; struct dma_pool *cache_pool; struct dma_pool *padding_pool;
};
/** * struct mv_cesa_engine - CESA engine * @id: engine id * @regs: engine registers * @sram: SRAM memory region * @sram_pool: SRAM memory region from pool * @sram_dma: DMA address of the SRAM memory region * @lock: engine lock * @req: current crypto request * @clk: engine clk * @zclk: engine zclk * @max_req_len: maximum chunk length (useful to create the TDMA chain) * @int_mask: interrupt mask cache * @pool: memory pool pointing to the memory region reserved in * SRAM * @queue: fifo of the pending crypto requests * @load: engine load counter, useful for load balancing * @chain_hw: list of the current tdma descriptors being processed * by the hardware. * @chain_sw: list of the current tdma descriptors that will be * submitted to the hardware. * @complete_queue: fifo of the processed requests by the engine * * Structure storing CESA engine information.
*/ struct mv_cesa_engine { int id; void __iomem *regs; union { void __iomem *sram; void *sram_pool;
};
dma_addr_t sram_dma;
spinlock_t lock; struct crypto_async_request *req; struct clk *clk; struct clk *zclk;
size_t max_req_len;
u32 int_mask; struct gen_pool *pool; struct crypto_queue queue;
atomic_t load; struct mv_cesa_tdma_chain chain_hw; struct mv_cesa_tdma_chain chain_sw; struct list_head complete_queue; int irq;
};
/** * struct mv_cesa_req_ops - CESA request operations * @process: process a request chunk result (should return 0 if the * operation, -EINPROGRESS if it needs more steps or an error * code) * @step: launch the crypto operation on the next chunk * @cleanup: cleanup the crypto request (release associated data) * @complete: complete the request, i.e copy result or context from sram when * needed.
*/ struct mv_cesa_req_ops { int (*process)(struct crypto_async_request *req, u32 status); void (*step)(struct crypto_async_request *req); void (*cleanup)(struct crypto_async_request *req); void (*complete)(struct crypto_async_request *req);
};
/** * struct mv_cesa_ctx - CESA operation context * @ops: crypto operations * * Base context structure inherited by operation specific ones.
*/ struct mv_cesa_ctx { conststruct mv_cesa_req_ops *ops;
};
/** * struct mv_cesa_req - CESA request * @engine: engine associated with this request * @chain: list of tdma descriptors associated with this request
*/ struct mv_cesa_req { struct mv_cesa_engine *engine; struct mv_cesa_tdma_chain chain;
};
/** * struct mv_cesa_sg_std_iter - CESA scatter-gather iterator for standard * requests * @iter: sg mapping iterator * @offset: current offset in the SG entry mapped in memory
*/ struct mv_cesa_sg_std_iter { struct sg_mapping_iter iter; unsignedint offset;
};
/** * struct mv_cesa_skcipher_std_req - cipher standard request * @op: operation context * @offset: current operation offset * @size: size of the crypto operation
*/ struct mv_cesa_skcipher_std_req { struct mv_cesa_op_ctx op; unsignedint offset; unsignedint size; bool skip_ctx;
};
/** * struct mv_cesa_skcipher_req - cipher request * @req: type specific request information * @src_nents: number of entries in the src sg list * @dst_nents: number of entries in the dest sg list
*/ struct mv_cesa_skcipher_req { struct mv_cesa_req base; struct mv_cesa_skcipher_std_req std; int src_nents; int dst_nents;
};
/** * struct mv_cesa_ahash_std_req - standard hash request * @offset: current operation offset
*/ struct mv_cesa_ahash_std_req { unsignedint offset;
};
/** * struct mv_cesa_ahash_req - hash request * @req: type specific request information * @cache: cache buffer * @cache_ptr: write pointer in the cache buffer * @len: hash total length * @src_nents: number of entries in the scatterlist * @last_req: define whether the current operation is the last one * or not * @state: hash state
*/ struct mv_cesa_ahash_req { struct mv_cesa_req base; union { struct mv_cesa_ahash_dma_req dma; struct mv_cesa_ahash_std_req std;
} req; struct mv_cesa_op_ctx op_tmpl;
u8 cache[CESA_MAX_HASH_BLOCK_SIZE]; unsignedint cache_ptr;
u64 len; int src_nents; bool last_req; bool algo_le;
u32 state[8];
};
/* * Helper function that indicates whether a crypto request needs to be * cleaned up or not after being enqueued using mv_cesa_queue_req().
*/ staticinlineint mv_cesa_req_needs_cleanup(struct crypto_async_request *req, int ret)
{ /* * The queue still had some space, the request was queued * normally, so there's no need to clean it up.
*/ if (ret == -EINPROGRESS) returnfalse;
/* * The queue had not space left, but since the request is * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to * the backlog and will be processed later. There's no need to * clean it up.
*/ if (ret == -EBUSY) returnfalse;
/* Request wasn't queued, we need to clean it up */ returntrue;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.