/* * CLLR_LA / sizeof(struct stm32_dma3_hwdesc) represents the number of hdwdesc that can be addressed * by the pointer to the next linked-list data structure. The __aligned forces the 32-byte * alignment. So use hardcoded 32. Multiplied by the max block size of each item, it represents * the sg size limitation.
*/ #define STM32_DMA3_MAX_SEG_SIZE ((CLLR_LA / 32) * STM32_DMA3_MAX_BLOCK_SIZE)
/* * If the memory to be allocated for the number of hwdesc (6 u32 members but 32-bytes * aligned) is greater than the maximum address of CLLR_LA, then the last items can't be * addressed, so abort the allocation.
*/ if ((count * 32) > CLLR_LA) {
dev_err(chan2dev(chan), "Transfer is too big (> %luB)\n", STM32_DMA3_MAX_SEG_SIZE); return NULL;
}
if (!bndt && !FIELD_GET(CLLR_UB1, cllr))
dev_err(dev, "null source block size and no update of this value\n"); if (bndt % sdw)
dev_err(dev, "source block size not multiple of src data width\n"); if (FIELD_GET(CTR1_PAM, ctr1) == CTR1_PAM_PACK_UNPACK && bndt % ddw)
dev_err(dev, "(un)packing mode w/ src block size not multiple of dst data width\n"); if (csar % sdw)
dev_err(dev, "unaligned source address not multiple of src data width\n"); if (cdar % ddw)
dev_err(dev, "unaligned destination address not multiple of dst data width\n"); if (sdw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[sap]))
dev_err(dev, "double-word source data width not supported on port %u\n", sap); if (ddw == DMA_SLAVE_BUSWIDTH_8_BYTES && port_is_ahb(ddata->ports_max_dw[dap]))
dev_err(dev, "double-word destination data width not supported on port %u\n", dap);
}
/* * Make sure to flush the CPU's write buffers so that the descriptors are ready to be read * by DMA3. By explicitly using a write memory barrier here, instead of doing it with writel * to enable the channel, we avoid an unnecessary barrier in the case where the descriptors * are reused (DMA_CTRL_REUSE).
*/ if (is_last)
dma_wmb();
}
/* len is a multiple of dw, so if len is < chan_max_burst, shorten burst */ if (len < chan_max_burst)
max_burst = len / dw;
/* * HW doesn't modify the burst if burst size <= half of the fifo size. * If len is not a multiple of burst size, last burst is shortened by HW. * Take care of maximum burst supported on interconnect bus.
*/ return min_t(u32, max_burst, bus_max_burst);
}
/* Following conditions would raise User Setting Error interrupt */ if (!(dma_device.src_addr_widths & BIT(sdw)) || !(dma_device.dst_addr_widths & BIT(ddw))) {
dev_err(chan2dev(chan), "Bus width (src=%u, dst=%u) not supported\n", sdw, ddw); return -EINVAL;
}
if (ddata->ports_max_dw[1] == DW_INVALID && (sap || dap)) {
dev_err(chan2dev(chan), "Only one master port, port 1 is not supported\n"); return -EINVAL;
}
sap_max_dw = ddata->ports_max_dw[sap];
dap_max_dw = ddata->ports_max_dw[dap]; if ((port_is_ahb(sap_max_dw) && sdw == DMA_SLAVE_BUSWIDTH_8_BYTES) ||
(port_is_ahb(dap_max_dw) && ddw == DMA_SLAVE_BUSWIDTH_8_BYTES)) {
dev_err(chan2dev(chan), "8 bytes buswidth (src=%u, dst=%u) not supported on port (sap=%u, dap=%u\n",
sdw, ddw, sap, dap); return -EINVAL;
}
if (FIELD_GET(STM32_DMA3_DT_SINC, tr_conf))
_ctr1 |= CTR1_SINC; if (sap)
_ctr1 |= CTR1_SAP; if (port_is_axi(sap_max_dw)) /* AXI - apply axi maximum burst limitation */
src_max_burst = ddata->axi_max_burst_len; if (FIELD_GET(STM32_DMA3_DT_DINC, tr_conf))
_ctr1 |= CTR1_DINC; if (dap)
_ctr1 |= CTR1_DAP; if (port_is_axi(dap_max_dw)) /* AXI - apply axi maximum burst limitation */
dst_max_burst = ddata->axi_max_burst_len;
/* Store TCEM to know on which event TC flag occurred */
chan->tcem = tcem; /* Store direction for residue computation */
chan->dma_config.direction = dir;
switch (dir) { case DMA_MEM_TO_DEV: /* Set destination (device) data width and burst */
ddw = min_t(u32, ddw, stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw,
len, dst_addr));
dbl_max = min_t(u32, dbl_max, stm32_dma3_get_max_burst(len, ddw, chan->max_burst,
dst_max_burst));
/* Set source (memory) data width and burst */
sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst); if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf)) {
sdw = ddw;
sbl_max = dbl_max;
}
if (ddw != sdw) {
_ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK); /* Should never reach this case as ddw is clamped down */ if (len & (ddw - 1)) {
dev_err(chan2dev(chan), "Packing mode is enabled and len is not multiple of ddw"); return -EINVAL;
}
}
/* dst = dev */
_ctr2 |= CTR2_DREQ;
break;
case DMA_DEV_TO_MEM: /* Set source (device) data width and burst */
sdw = min_t(u32, sdw, stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw,
len, src_addr));
sbl_max = min_t(u32, sbl_max, stm32_dma3_get_max_burst(len, sdw, chan->max_burst,
src_max_burst));
/* Set destination (memory) data width and burst */
ddw = stm32_dma3_get_max_dw(chan->max_burst, dap_max_dw, len, dst_addr);
dbl_max = stm32_dma3_get_max_burst(len, ddw, chan->max_burst, dst_max_burst); if (!!FIELD_GET(STM32_DMA3_DT_NOPACK, tr_conf) ||
((_ctr2 & CTR2_PFREQ) && ddw > sdw)) { /* Packing to wider ddw not supported */
ddw = sdw;
dbl_max = sbl_max;
}
if (ddw != sdw) {
_ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK); /* Should never reach this case as ddw is clamped down */ if (len & (ddw - 1)) {
dev_err(chan2dev(chan), "Packing mode is enabled and len is not multiple of ddw\n"); return -EINVAL;
}
}
/* dst = mem */
_ctr2 &= ~CTR2_DREQ;
break;
case DMA_MEM_TO_MEM: /* Set source (memory) data width and burst */
init_dw = sdw;
init_bl_max = sbl_max;
sdw = stm32_dma3_get_max_dw(chan->max_burst, sap_max_dw, len, src_addr);
sbl_max = stm32_dma3_get_max_burst(len, sdw, chan->max_burst, src_max_burst); if (chan->config_set & STM32_DMA3_CFG_SET_DMA) {
sdw = min_t(u32, init_dw, sdw);
sbl_max = min_t(u32, init_bl_max, stm32_dma3_get_max_burst(len, sdw,
chan->max_burst,
src_max_burst));
}
if (ddw != sdw) {
_ctr1 |= FIELD_PREP(CTR1_PAM, CTR1_PAM_PACK_UNPACK); /* Should never reach this case as ddw is clamped down */ if (len & (ddw - 1)) {
dev_err(chan2dev(chan), "Packing mode is enabled and len is not multiple of ddw"); return -EINVAL;
}
}
/* CTR2_REQSEL/DREQ/BREQ/PFREQ are ignored with CTR2_SWREQ=1 */
_ctr2 |= CTR2_SWREQ;
break;
default:
dev_err(chan2dev(chan), "Direction %s not supported\n",
dmaengine_get_direction_text(dir)); return -EINVAL;
}
/* If cllr is null, it means it is either the last or single item */ if (!cllr) return swdesc->lli_size - 1;
/* In cyclic mode, go fast and first check we are not on the last item */ if (swdesc->cyclic && next_lli_offset == (swdesc->lli[0].hwdesc_addr & CLLR_LA)) return swdesc->lli_size - 1;
/* As transfer is in progress, look backward from the last item */ for (i = swdesc->lli_size - 1; i > 0; i--) {
*residue += FIELD_GET(CBR1_BNDT, swdesc->lli[i].hwdesc->cbr1);
lli_offset = swdesc->lli[i].hwdesc_addr & CLLR_LA; if (lli_offset == next_lli_offset) return i - 1;
}
/* If channel is still active (CSR_IDLEF is not set), can't get a reliable residue */ if (!(csr & CSR_IDLEF))
dev_warn(dev, "Can't get residue: channel still active, csr=%08x\n", csr);
/* * If channel is not suspended, but Idle and Transfer Complete are set, * linked-list is over, no residue
*/ if (!(csr & CSR_SUSPF) && (csr & CSR_TCF) && (csr & CSR_IDLEF)) return;
/* Read registers to have a snapshot */
cllr = readl_relaxed(ddata->base + STM32_DMA3_CLLR(chan->id));
cbr1 = readl_relaxed(ddata->base + STM32_DMA3_CBR1(chan->id));
cdar = readl_relaxed(ddata->base + STM32_DMA3_CDAR(chan->id));
/* Resume current transfer */ if (csr & CSR_SUSPF) {
writel_relaxed(CFCR_SUSPF, ddata->base + STM32_DMA3_CFCR(chan->id));
writel_relaxed(swdesc->ccr, ddata->base + STM32_DMA3_CCR(chan->id));
}
/* Get current hwdesc and cumulate residue of pending hwdesc BNDT */
ret = stm32_dma3_chan_get_curr_hwdesc(swdesc, cllr, &residue); if (ret < 0) {
dev_err(chan2dev(chan), "Can't get residue: current hwdesc not found\n"); return;
}
curr_lli = ret;
/* Read current FIFO level - in units of programmed destination data width */
hwdesc = swdesc->lli[curr_lli].hwdesc;
fifol = FIELD_GET(CSR_FIFOL, csr) * (1 << FIELD_GET(CTR1_DDW_LOG2, hwdesc->ctr1)); /* If the FIFO contains as many bytes as its size, it can't contain more */ if (fifol == (1 << (chan->fifo_size + 1))) goto skip_fifol_update;
/* * In case of PACKING (Destination burst length > Source burst length) or UNPACKING * (Source burst length > Destination burst length), bytes could be pending in the FIFO * (to be packed up to Destination burst length or unpacked into Destination burst length * chunks). * BNDT is not reliable, as it reflects the number of bytes read from the source but not the * number of bytes written to the destination. * FIFOL is also not sufficient, because it reflects the number of available write beats in * units of Destination data width but not the bytes not yet packed or unpacked. * In case of Destination increment DINC, it is possible to compute the number of bytes in * the FIFO: * fifol_in_bytes = bytes_read - bytes_written.
*/
pack_unpack = !!(FIELD_GET(CTR1_PAM, hwdesc->ctr1) == CTR1_PAM_PACK_UNPACK); if (pack_unpack && (hwdesc->ctr1 & CTR1_DINC)) { int bytes_read = FIELD_GET(CBR1_BNDT, hwdesc->cbr1) - bndt; int bytes_written = cdar - hwdesc->cdar;
if (bytes_read > 0)
fifol = bytes_read - bytes_written;
}
skip_fifol_update: if (fifol) {
dev_dbg(chan2dev(chan), "%u byte(s) in the FIFO\n", fifol);
dma_set_in_flight_bytes(txstate, fifol); /* * Residue is already accurate for DMA_MEM_TO_DEV as BNDT reflects data read from * the source memory buffer, so just need to add fifol to residue in case of * DMA_DEV_TO_MEM transfer because these bytes are not yet written in destination * memory buffer.
*/ if (chan->dma_config.direction == DMA_DEV_TO_MEM)
residue += fifol;
}
dma_set_residue(txstate, residue);
}
staticint stm32_dma3_chan_stop(struct stm32_dma3_chan *chan)
{ struct stm32_dma3_ddata *ddata = to_stm32_dma3_ddata(chan);
u32 ccr; int ret = 0;
if (!(ccr & CCR_SUSP) && (ccr & CCR_EN)) { /* Suspend the channel */
ret = stm32_dma3_chan_suspend(chan, true); if (ret)
dev_warn(chan2dev(chan), "%s: timeout, data might be lost\n", __func__);
}
/* * Reset the channel: this causes the reset of the FIFO and the reset of the channel * internal state, the reset of CCR_EN and CCR_SUSP bits.
*/
stm32_dma3_chan_reset(chan);
return ret;
}
staticvoid stm32_dma3_chan_complete(struct stm32_dma3_chan *chan)
{ if (!chan->swdesc) return;
if (csr & CSR_ULEF && ccr & CCR_ULEIE) {
dev_err(chan2dev(chan), "Update link transfer error\n");
chan->dma_status = DMA_ERROR; /* CCR.EN automatically cleared by HW */
stm32_dma3_chan_reset(chan);
}
if (csr & CSR_DTEF && ccr & CCR_DTEIE) {
dev_err(chan2dev(chan), "Data transfer error\n");
chan->dma_status = DMA_ERROR; /* CCR.EN automatically cleared by HW */
stm32_dma3_chan_reset(chan);
}
/* * Half Transfer Interrupt may be disabled but Half Transfer Flag can be set, * ensure HTF flag to be cleared, with other flags.
*/
csr &= (ccr | CCR_HTIE);
if (csr)
writel_relaxed(csr, ddata->base + STM32_DMA3_CFCR(chan->id));
ret = pm_runtime_resume_and_get(ddata->dma_dev.dev); if (ret < 0) return ret;
/* Ensure the channel is free */ if (chan->semaphore_mode &&
readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(chan->id)) & CSEMCR_SEM_MUTEX) {
ret = -EBUSY; goto err_put_sync;
}
chan->lli_pool = dmam_pool_create(dev_name(&c->dev->device), c->device->dev, sizeof(struct stm32_dma3_hwdesc),
__alignof__(struct stm32_dma3_hwdesc), SZ_64K); if (!chan->lli_pool) {
dev_err(chan2dev(chan), "Failed to create LLI pool\n");
ret = -ENOMEM; goto err_put_sync;
}
/* Take the channel semaphore */ if (chan->semaphore_mode) {
writel_relaxed(CSEMCR_SEM_MUTEX, ddata->base + STM32_DMA3_CSEMCR(id));
csemcr = readl_relaxed(ddata->base + STM32_DMA3_CSEMCR(id));
ccid = FIELD_GET(CSEMCR_SEM_CCID, csemcr); /* Check that the channel is well taken */ if (ccid != CCIDCFGR_CID1) {
dev_err(chan2dev(chan), "Not under CID1 control (in-use by CID%d)\n", ccid);
ret = -EPERM; goto err_pool_destroy;
}
dev_dbg(chan2dev(chan), "Under CID1 control (semcr=0x%08x)\n", csemcr);
}
if (prevent_refactor) return DIV_ROUND_UP(len, STM32_DMA3_MAX_BLOCK_SIZE);
count = len / STM32_DMA3_MAX_BLOCK_SIZE;
len -= (len / STM32_DMA3_MAX_BLOCK_SIZE) * STM32_DMA3_MAX_BLOCK_SIZE;
if (len >= chan->max_burst) {
count += 1; /* len < STM32_DMA3_MAX_BLOCK_SIZE here, so it fits in one item */
len -= (len / chan->max_burst) * chan->max_burst;
}
/* Unaligned remainder fits in one extra item */ if (len > 0)
count += 1;
/* Get residue/in_flight_bytes only if a transfer is currently running (swdesc != NULL) */ if (swdesc)
stm32_dma3_chan_set_residue(chan, swdesc, txstate);
/* Request dma channel among the generic dma controller list */
c = dma_request_channel(mask, stm32_dma3_filter_fn, &conf); if (!c) {
dev_err(ddata->dma_dev.dev, "No suitable channel found\n"); return NULL;
}
/* * CID filtering must be configured to ensure that the DMA3 channel will inherit the CID of * the processor which is configuring and using the given channel. * In case CID filtering is not configured, dma-channel-mask property can be used to * specify available DMA channels to the kernel.
*/
of_property_read_u32(ddata->dma_dev.dev->of_node, "dma-channel-mask", &mask);
/* Reserve !CID-filtered not in dma-channel-mask, static CID != CID1, CID1 not allowed */ for (i = 0; i < ddata->dma_channels; i++) {
ccidcfgr = readl_relaxed(ddata->base + STM32_DMA3_CCIDCFGR(i));
ddata->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ddata->base)) return PTR_ERR(ddata->base);
ddata->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(ddata->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(ddata->clk), "Failed to get clk\n");
reset = devm_reset_control_get_optional(&pdev->dev, NULL); if (IS_ERR(reset)) return dev_err_probe(&pdev->dev, PTR_ERR(reset), "Failed to get reset\n");
ret = clk_prepare_enable(ddata->clk); if (ret) return dev_err_probe(&pdev->dev, ret, "Failed to enable clk\n");
reset_control_reset(reset);
INIT_LIST_HEAD(&dma_dev->channels);
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
dma_dev->dev = &pdev->dev; /* * This controller supports up to 8-byte buswidth depending on the port used and the * channel, and can only access address at even boundaries, multiple of the buswidth.
*/
dma_dev->copy_align = DMAENGINE_ALIGN_8_BYTES;
dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | BIT(DMA_MEM_TO_MEM);
/* if dma_channels is not modified, get it from hwcfgr1 */ if (of_property_read_u32(np, "dma-channels", &ddata->dma_channels)) {
hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR1);
ddata->dma_channels = FIELD_GET(G_NUM_CHANNELS, hwcfgr);
}
/* if dma_requests is not modified, get it from hwcfgr2 */ if (of_property_read_u32(np, "dma-requests", &ddata->dma_requests)) {
hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR2);
ddata->dma_requests = FIELD_GET(G_MAX_REQ_ID, hwcfgr) + 1;
}
ddata->ports_max_dw[0] = FIELD_GET(G_M0_DATA_WIDTH_ENC, hwcfgr); if (master_ports == AXI64 || master_ports == AHB32) /* Single master port */
ddata->ports_max_dw[1] = DW_INVALID; else/* Dual master ports */
ddata->ports_max_dw[1] = FIELD_GET(G_M1_DATA_WIDTH_ENC, hwcfgr);
/* axi_max_burst_len is optional, if not defined, use STM32_DMA3_MAX_BURST_LEN */
ddata->axi_max_burst_len = STM32_DMA3_MAX_BURST_LEN;
pdata = device_get_match_data(&pdev->dev); if (pdata && pdata->axi_max_burst_len) {
ddata->axi_max_burst_len = min_t(u32, pdata->axi_max_burst_len,
STM32_DMA3_MAX_BURST_LEN);
dev_dbg(&pdev->dev, "Burst is limited to %u beats through AXI port\n",
ddata->axi_max_burst_len);
}
ddata->chans = devm_kcalloc(&pdev->dev, ddata->dma_channels, sizeof(*ddata->chans),
GFP_KERNEL); if (!ddata->chans) {
ret = -ENOMEM; goto err_clk_disable;
}
chan_reserved = stm32_dma3_check_rif(ddata);
if (chan_reserved == GENMASK(ddata->dma_channels - 1, 0)) {
ret = -ENODEV;
dev_err_probe(&pdev->dev, ret, "No channel available, abort registration\n"); goto err_clk_disable;
}
/* G_FIFO_SIZE x=0..7 in HWCFGR3 and G_FIFO_SIZE x=8..15 in HWCFGR4 */
hwcfgr = readl_relaxed(ddata->base + STM32_DMA3_HWCFGR3);
hwcfgr |= ((u64)readl_relaxed(ddata->base + STM32_DMA3_HWCFGR4)) << 32;
for (i = 0; i < ddata->dma_channels; i++) { if (chan_reserved & BIT(i)) continue;
chan = &ddata->chans[i];
chan->id = i;
chan->fifo_size = get_chan_hwcfg(i, G_FIFO_SIZE(i), hwcfgr); /* If chan->fifo_size > 0 then half of the fifo size, else no burst when no FIFO */
chan->max_burst = (chan->fifo_size) ? (1 << (chan->fifo_size + 1)) / 2 : 0;
}
ret = dmaenginem_async_device_register(dma_dev); if (ret) goto err_clk_disable;
for (i = 0; i < ddata->dma_channels; i++) { char name[12];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.