/** * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion * @data: data pointer * * This function is a callback for DMA descriptor completion
*/ void qcom_qpic_bam_dma_done(void *data)
{ struct bam_transaction *bam_txn = data;
/** * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device * @nandc: qpic nand controller * @is_cpu: cpu or Device * * This function will check for dma sync for cpu or device
*/ inlinevoid qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu)
{ if (!nandc->props->supports_bam) return;
/** * qcom_prepare_bam_async_desc() - Prepare DMA descriptor * @nandc: qpic nand controller * @chan: dma channel * @flags: flags to control DMA descriptor preparation * * This function maps the scatter gather list for DMA transfer and forms the * DMA descriptor for BAM.This descriptor will be added in the NAND DMA * descriptor queue which will be submitted to DMA engine.
*/ int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc, struct dma_chan *chan, unsignedlong flags)
{ struct desc_info *desc; struct scatterlist *sgl; unsignedint sgl_cnt; int ret; struct bam_transaction *bam_txn = nandc->bam_txn; enum dma_transfer_direction dir_eng; struct dma_async_tx_descriptor *dma_desc;
desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM;
/** * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA * @nandc: qpic nand controller * @read: read or write type * @reg_off: offset within the controller's data buffer * @vaddr: virtual address of the buffer we want to write to * @size: DMA transaction size in bytes * @flags: flags to control DMA descriptor preparation * * This function will prepares the command descriptor for BAM DMA * which will be used for NAND register reads and writes.
*/ int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, int reg_off, constvoid *vaddr, int size, unsignedint flags)
{ int bam_ce_size; int i, ret; struct bam_cmd_element *bam_ce_buffer; struct bam_transaction *bam_txn = nandc->bam_txn;
u32 offset;
if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) {
dev_err(nandc->dev, "BAM %s array is full\n", "CE"); return -EINVAL;
}
/* fill the command desc */ for (i = 0; i < size; i++) {
offset = nandc->props->bam_offset + reg_off + 4 * i; if (read)
bam_prep_ce(&bam_ce_buffer[i],
offset, BAM_READ_COMMAND,
reg_buf_dma_addr(nandc,
(__le32 *)vaddr + i)); else
bam_prep_ce_le32(&bam_ce_buffer[i],
offset, BAM_WRITE_COMMAND,
*((__le32 *)vaddr + i));
}
bam_txn->bam_ce_pos += size;
/* use the separate sgl after this command */ if (flags & NAND_BAM_NEXT_SGL) { if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) {
dev_err(nandc->dev, "BAM %s array is full\n", "CMD sgl"); return -EINVAL;
}
/** * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA * @nandc: qpic nand controller * @read: read or write type * @vaddr: virtual address of the buffer we want to write to * @size: DMA transaction size in bytes * @flags: flags to control DMA descriptor preparation * * This function will prepares the data descriptor for BAM DMA which * will be used for NAND data reads and writes.
*/ int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, constvoid *vaddr, int size, unsignedint flags)
{ int ret; struct bam_transaction *bam_txn = nandc->bam_txn;
if (read) { if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) {
dev_err(nandc->dev, "BAM %s array is full\n", "RX sgl"); return -EINVAL;
}
/* * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag * is not set, form the DMA descriptor
*/ if (!(flags & NAND_BAM_NO_EOT)) {
ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
DMA_PREP_INTERRUPT); if (ret) return ret;
}
}
/** * qcom_prep_adm_dma_desc() - Prepare descriptor for adma * @nandc: qpic nand controller * @read: read or write type * @reg_off: offset within the controller's data buffer * @vaddr: virtual address of the buffer we want to write to * @size: adm dma transaction size in bytes * @flow_control: flow controller * * This function will prepare descriptor for adma
*/ int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, int reg_off, constvoid *vaddr, int size, bool flow_control)
{ struct qcom_adm_peripheral_config periph_conf = {}; struct dma_async_tx_descriptor *dma_desc; struct dma_slave_config slave_conf = {0}; enum dma_transfer_direction dir_eng; struct desc_info *desc; struct scatterlist *sgl; int ret;
desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM;
/** * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer * @nandc: qpic nand controller * @first: offset of the first register in the contiguous block * @num_regs: number of registers to read * @flags: flags to control DMA descriptor preparation * * This function will prepares a descriptor to read a given number of * contiguous registers to the reg_read_buf pointer.
*/ int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, int num_regs, unsignedint flags)
{ bool flow_control = false; void *vaddr;
/** * qcom_write_reg_dma() - write a given number of registers * @nandc: qpic nand controller * @vaddr: contiguous memory from where register value will * be written * @first: offset of the first register in the contiguous block * @num_regs: number of registers to write * @flags: flags to control DMA descriptor preparation * * This function will prepares a descriptor to write a given number of * contiguous registers
*/ int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, int first, int num_regs, unsignedint flags)
{ bool flow_control = false;
if (first == NAND_EXEC_CMD)
flags |= NAND_BAM_NWD;
if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1);
if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD)
first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD);
if (nandc->props->supports_bam) return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr,
num_regs, flags);
/** * qcom_read_data_dma() - transfer data * @nandc: qpic nand controller * @reg_off: offset within the controller's data buffer * @vaddr: virtual address of the buffer we want to write to * @size: DMA transaction size in bytes * @flags: flags to control DMA descriptor preparation * * This function will prepares a DMA descriptor to transfer data from the * controller's internal buffer to the buffer 'vaddr'
*/ int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr, int size, unsignedint flags)
{ if (nandc->props->supports_bam) return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags);
/** * qcom_write_data_dma() - transfer data * @nandc: qpic nand controller * @reg_off: offset within the controller's data buffer * @vaddr: virtual address of the buffer we want to read from * @size: DMA transaction size in bytes * @flags: flags to control DMA descriptor preparation * * This function will prepares a DMA descriptor to transfer data from * 'vaddr' to the controller's internal buffer
*/ int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, const u8 *vaddr, int size, unsignedint flags)
{ if (nandc->props->supports_bam) return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags);
/** * qcom_submit_descs() - submit dma descriptor * @nandc: qpic nand controller * * This function will submit all the prepared dma descriptor * cmd or data descriptor
*/ int qcom_submit_descs(struct qcom_nand_controller *nandc)
{ struct desc_info *desc, *n;
dma_cookie_t cookie = 0; struct bam_transaction *bam_txn = nandc->bam_txn; int ret = 0;
if (nandc->props->supports_bam) { if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) {
ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0); if (ret) goto err_unmap_free_desc;
}
if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) {
ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan,
DMA_PREP_INTERRUPT); if (ret) goto err_unmap_free_desc;
}
if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) {
ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan,
DMA_PREP_CMD); if (ret) goto err_unmap_free_desc;
}
}
if (!wait_for_completion_timeout(&bam_txn->txn_done,
QPIC_NAND_COMPLETION_TIMEOUT))
ret = -ETIMEDOUT;
} else { if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
ret = -ETIMEDOUT;
}
err_unmap_free_desc: /* * Unmap the dma sg_list and free the desc allocated by both * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions.
*/
list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
list_del(&desc->node);
/** * qcom_clear_read_regs() - reset the read register buffer * @nandc: qpic nand controller * * This function reset the register read buffer for next NAND operation
*/ void qcom_clear_read_regs(struct qcom_nand_controller *nandc)
{
nandc->reg_read_pos = 0;
qcom_nandc_dev_to_mem(nandc, false);
}
EXPORT_SYMBOL(qcom_clear_read_regs);
/** * qcom_nandc_unalloc() - unallocate qpic nand controller * @nandc: qpic nand controller * * This function will unallocate memory alloacted for qpic nand controller
*/ void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
{ if (nandc->props->supports_bam) { if (!dma_mapping_error(nandc->dev, nandc->reg_read_dma))
dma_unmap_single(nandc->dev, nandc->reg_read_dma,
MAX_REG_RD * sizeof(*nandc->reg_read_buf),
DMA_FROM_DEVICE);
if (nandc->tx_chan)
dma_release_channel(nandc->tx_chan);
if (nandc->rx_chan)
dma_release_channel(nandc->rx_chan);
if (nandc->cmd_chan)
dma_release_channel(nandc->cmd_chan);
} else { if (nandc->chan)
dma_release_channel(nandc->chan);
}
}
EXPORT_SYMBOL(qcom_nandc_unalloc);
/** * qcom_nandc_alloc() - Allocate qpic nand controller * @nandc: qpic nand controller * * This function will allocate memory for qpic nand controller
*/ int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
{ int ret;
ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32)); if (ret) {
dev_err(nandc->dev, "failed to set DMA mask\n"); return ret;
}
/* * we use the internal buffer for reading ONFI params, reading small * data like ID and status, and preforming read-copy-write operations * when writing to a codeword partially. 532 is the maximum possible * size of a codeword for our nand controller
*/
nandc->buf_size = 532;
nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size, GFP_KERNEL); if (!nandc->data_buffer) return -ENOMEM;
nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs), GFP_KERNEL); if (!nandc->regs) return -ENOMEM;
nandc->reg_read_buf = devm_kcalloc(nandc->dev, MAX_REG_RD, sizeof(*nandc->reg_read_buf),
GFP_KERNEL); if (!nandc->reg_read_buf) return -ENOMEM;
if (nandc->props->supports_bam) {
nandc->reg_read_dma =
dma_map_single(nandc->dev, nandc->reg_read_buf,
MAX_REG_RD * sizeof(*nandc->reg_read_buf),
DMA_FROM_DEVICE); if (dma_mapping_error(nandc->dev, nandc->reg_read_dma)) {
dev_err(nandc->dev, "failed to DMA MAP reg buffer\n"); return -EIO;
}
/* * Initially allocate BAM transaction to read ONFI param page. * After detecting all the devices, this BAM transaction will * be freed and the next BAM transaction will be allocated with * maximum codeword size
*/
nandc->max_cwperpage = 1;
nandc->bam_txn = qcom_alloc_bam_transaction(nandc); if (!nandc->bam_txn) {
dev_err(nandc->dev, "failed to allocate bam transaction\n");
ret = -ENOMEM; goto unalloc;
}
} else {
nandc->chan = dma_request_chan(nandc->dev, "rxtx"); if (IS_ERR(nandc->chan)) {
ret = PTR_ERR(nandc->chan);
nandc->chan = NULL;
dev_err_probe(nandc->dev, ret, "rxtx DMA channel request failed\n"); return ret;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.