// SPDX-License-Identifier: GPL-2.0 // // Driver for the SPI-NAND mode of Mediatek NAND Flash Interface // // Copyright (c) 2022 Chuanhong Guo <gch981213@gmail.com> // // This driver is based on the SPI-NAND mtd driver from Mediatek SDK: // // Copyright (C) 2020 MediaTek Inc. // Author: Weijie Gao <weijie.gao@mediatek.com> // // This controller organize the page data as several interleaved sectors // like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size) // +---------+------+------+---------+------+------+-----+ // | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... | // +---------+------+------+---------+------+------+-----+ // With auto-format turned on, DMA only returns this part: // +---------+---------+-----+ // | Sector1 | Sector2 | ... | // +---------+---------+-----+ // The FDM data will be filled to the registers, and ECC parity data isn't // accessible. // With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA // in it's original order shown in the first table. ECC can't be turned on when // auto-format is off. // // However, Linux SPI-NAND driver expects the data returned as: // +------+-----+ // | Page | OOB | // +------+-----+ // where the page data is continuously stored instead of interleaved. // So we assume all instructions matching the page_op template between ECC // prepare_io_req and finish_io_req are for page cache r/w. // Here's how this spi-mem driver operates when reading: // 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off). // 2. Perform page ops and let the controller fill the DMA bounce buffer with // de-interleaved sector data and set FDM registers. // 3. Return the data as: // +---------+---------+-----+------+------+-----+ // | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... | // +---------+---------+-----+------+------+-----+ // 4. For other matching spi_mem ops outside a prepare/finish_io_req pair, // read the data with auto-format off into the bounce buffer and copy // needed data to the buffer specified in the request. // // Write requests operates in a similar manner. // As a limitation of this strategy, we won't be able to access any ECC parity // data at all in Linux. // // Here's the bad block mark situation on MTK chips: // In older chips like mt7622, MTK uses the first FDM byte in the first sector // as the bad block mark. After de-interleaving, this byte appears at [pagesize] // in the returned data, which is the BBM position expected by kernel. However, // the conventional bad block mark is the first byte of the OOB, which is part // of the last sector data in the interleaved layout. Instead of fixing their // hardware, MTK decided to address this inconsistency in software. On these // later chips, the BootROM expects the following: // 1. The [pagesize] byte on a nand page is used as BBM, which will appear at // (page_size - (nsectors - 1) * spare_size) in the DMA buffer. // 2. The original byte stored at that position in the DMA buffer will be stored // as the first byte of the FDM section in the last sector. // We can't disagree with the BootROM, so after de-interleaving, we need to // perform the following swaps in read: // 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size], // which is the expected BBM position by kernel. // 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to // [page_size - (nsectors - 1) * spare_size] // Similarly, when writing, we need to perform swaps in the other direction.
ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
!(val & snf->caps->mastersta_mask), 0,
SNFI_POLL_INTERVAL); if (ret) {
dev_err(snf->dev, "NFI master is still busy after reset\n"); return ret;
}
ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
!(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0,
SNFI_POLL_INTERVAL); if (ret) {
dev_err(snf->dev, "Failed to reset NFI\n"); return ret;
}
fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
!(val & fifo_mask), 0, SNFI_POLL_INTERVAL); if (ret) {
dev_err(snf->dev, "NFI FIFOs are not empty\n"); return ret;
}
return 0;
}
staticint mtk_snand_mac_reset(struct mtk_snand *snf)
{ int ret;
u32 val;
nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
!(val & SPI_STATE), 0, SNFI_POLL_INTERVAL); if (ret)
dev_err(snf->dev, "Failed to reset SNFI MAC\n");
ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
val & WIP_READY, 0, SNFI_POLL_INTERVAL); if (ret) {
dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); goto cleanup;
}
ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
0, SNFI_POLL_INTERVAL); if (ret)
dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
switch (page_size) { case SZ_512:
pagesize_idx = NFI_PAGE_SIZE_512_2K; break; case SZ_2K: if (snf->caps->sector_size == 512)
pagesize_idx = NFI_PAGE_SIZE_2K_4K; else
pagesize_idx = NFI_PAGE_SIZE_512_2K; break; case SZ_4K: if (snf->caps->sector_size == 512)
pagesize_idx = NFI_PAGE_SIZE_4K_8K; else
pagesize_idx = NFI_PAGE_SIZE_2K_4K; break; case SZ_8K: if (snf->caps->sector_size == 512)
pagesize_idx = NFI_PAGE_SIZE_8K_16K; else
pagesize_idx = NFI_PAGE_SIZE_4K_8K; break; case SZ_16K:
pagesize_idx = NFI_PAGE_SIZE_8K_16K; break; default:
dev_err(snf->dev, "unsupported page size.\n"); goto err;
}
spare_size = oob_size / nsectors; // If we're using the 1KB sector size, HW will automatically double the // spare size. We should only use half of the value in this case. if (snf->caps->sector_size == 1024)
spare_size /= 2;
for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { if (snf->caps->spare_sizes[i] <= spare_size) {
spare_size = snf->caps->spare_sizes[i]; if (snf->caps->sector_size == 1024)
spare_size *= 2;
spare_idx = i; break;
}
}
// calculate the max possible strength under current page format
parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits;
mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
// if there's a user requested strength, find the minimum strength that // meets the requirement. Otherwise use the maximum strength which is // expected by BootROM. if (ecc_user && strength) {
u32 s_next = ecc_cfg->strength - 1;
while (1) {
mtk_ecc_adjust_strength(snf->ecc, &s_next); if (s_next >= ecc_cfg->strength) break; if (s_next < strength) break;
s_next = ecc_cfg->strength - 1;
}
}
if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) return;
// swap [pagesize] byte on nand with the first fdm byte // in the last sector.
buf_bbm_pos = snf->nfi_cfg.page_size -
(snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
fdm_bbm_pos = snf->nfi_cfg.page_size +
(snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) return;
// swap the first fdm byte in the first and the last sector.
fdm_bbm_pos1 = snf->nfi_cfg.page_size;
fdm_bbm_pos2 = snf->nfi_cfg.page_size +
(snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
}
staticint mtk_snand_read_page_cache(struct mtk_snand *snf, conststruct spi_mem_op *op)
{
u8 *buf = snf->buf;
u8 *buf_fdm = buf + snf->nfi_cfg.page_size; // the address part to be sent by the controller
u32 op_addr = op->addr.val; // where to start copying data from bounce buffer
u32 rd_offset = 0;
u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth);
u32 op_mode = 0;
u32 dma_len = snf->buf_len; int ret = 0;
u32 rd_mode, rd_bytes, val;
dma_addr_t buf_dma;
if (snf->autofmt) {
u32 last_bit;
u32 mask;
dma_len = snf->nfi_cfg.page_size;
op_mode = CNFG_AUTO_FMT_EN; if (op->data.ecc)
op_mode |= CNFG_HW_ECC_EN; // extract the plane bit: // Find the highest bit set in (pagesize+oobsize). // Bits higher than that in op->addr are kept and sent over SPI // Lower bits are used as an offset for copying data from DMA // bounce buffer.
last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
mask = (1 << last_bit) - 1;
rd_offset = op_addr & mask;
op_addr &= ~mask;
// check if we can dma to the caller memory if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
buf = op->data.buf.in;
}
mtk_snand_mac_reset(snf);
mtk_nfi_reset(snf);
if (!wait_for_completion_timeout(
&snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
dev_err(snf->dev, "DMA timed out for reading from cache.\n");
ret = -ETIMEDOUT; goto cleanup;
}
// Wait for BUS_SEC_CNTR returning expected value
ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
SNFI_POLL_INTERVAL); if (ret) {
dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); goto cleanup2;
}
// Wait for bus becoming idle
ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
!(val & snf->caps->mastersta_mask), 0,
SNFI_POLL_INTERVAL); if (ret) {
dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); goto cleanup2;
}
if (op->data.ecc) {
ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); if (ret) {
dev_err(snf->dev, "wait ecc done timeout\n"); goto cleanup2;
} // save status before disabling ecc
mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
snf->nfi_cfg.nsectors);
}
staticint mtk_snand_write_page_cache(struct mtk_snand *snf, conststruct spi_mem_op *op)
{ // the address part to be sent by the controller
u32 op_addr = op->addr.val; // where to start copying data from bounce buffer
u32 wr_offset = 0;
u32 op_mode = 0; int ret = 0;
u32 wr_mode = 0;
u32 dma_len = snf->buf_len;
u32 wr_bytes, val;
size_t cap_len;
dma_addr_t buf_dma;
if (!wait_for_completion_timeout(
&snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
dev_err(snf->dev, "DMA timed out for program load.\n");
ret = -ETIMEDOUT; goto cleanup_ecc;
}
// Wait for NFI_SEC_CNTR returning expected value
ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
SNFI_POLL_INTERVAL); if (ret)
dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
/** * mtk_snand_is_page_ops() - check if the op is a controller supported page op. * @op: spi-mem op to check * * Check whether op can be executed with read_from_cache or program_load * mode in the controller. * This controller can execute typical Read From Cache and Program Load * instructions found on SPI-NAND with 2-byte address. * DTR and cmd buswidth & nbytes should be checked before calling this. * * Return: true if the op matches the instruction template
*/ staticbool mtk_snand_is_page_ops(conststruct spi_mem_op *op)
{ if (op->addr.nbytes != 2) returnfalse;
staticint mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->controller); // page ops transfer size must be exactly ((sector_size + spare_size) * // nsectors). Limit the op size if the caller requests more than that. // exec_op will read more than needed and discard the leftover if the // caller requests less data. if (mtk_snand_is_page_ops(op)) {
size_t l; // skip adjust_op_size for page ops if (ms->autofmt) return 0;
l = ms->caps->sector_size + ms->nfi_cfg.spare_size;
l *= ms->nfi_cfg.nsectors; if (op->data.nbytes > l)
op->data.nbytes = l;
} else {
size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
ms->nfi_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ms->nfi_base)) {
ret = PTR_ERR(ms->nfi_base); goto release_ecc;
}
ms->dev = &pdev->dev;
ms->nfi_clk = devm_clk_get_enabled(&pdev->dev, "nfi_clk"); if (IS_ERR(ms->nfi_clk)) {
ret = PTR_ERR(ms->nfi_clk);
dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret); goto release_ecc;
}
ms->pad_clk = devm_clk_get_enabled(&pdev->dev, "pad_clk"); if (IS_ERR(ms->pad_clk)) {
ret = PTR_ERR(ms->pad_clk);
dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret); goto release_ecc;
}
ms->nfi_hclk = devm_clk_get_optional_enabled(&pdev->dev, "nfi_hclk"); if (IS_ERR(ms->nfi_hclk)) {
ret = PTR_ERR(ms->nfi_hclk);
dev_err(&pdev->dev, "unable to get nfi_hclk, err = %d\n", ret); goto release_ecc;
}
init_completion(&ms->op_done);
ms->irq = platform_get_irq(pdev, 0); if (ms->irq < 0) {
ret = ms->irq; goto release_ecc;
}
ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0, "mtk-snand", ms); if (ret) {
dev_err(ms->dev, "failed to request snfi irq\n"); goto release_ecc;
}
ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32)); if (ret) {
dev_err(ms->dev, "failed to set dma mask\n"); goto release_ecc;
}
// switch to SNFI mode
nfi_write32(ms, SNF_CFG, SPI_MODE);
ret = of_property_read_u32(np, "rx-sample-delay-ns", &val); if (!ret)
nfi_rmw32(ms, SNF_DLY_CTL3, SFCK_SAM_DLY,
val * SFCK_SAM_DLY_RANGE / SFCK_SAM_DLY_TOTAL);
ret = of_property_read_u32(np, "mediatek,rx-latch-latency-ns", &val); if (!ret) {
spi_freq = clk_get_rate(ms->pad_clk);
val = DIV_ROUND_CLOSEST(val, NSEC_PER_SEC / spi_freq);
nfi_rmw32(ms, SNF_MISC_CTL, DATA_READ_LATCH_LAT,
val << DATA_READ_LATCH_LAT_S);
}
// setup an initial page format for ops matching page_cache_op template // before ECC is called.
ret = mtk_snand_setup_pagefmt(ms, SZ_2K, SZ_64); if (ret) {
dev_err(ms->dev, "failed to set initial page format\n"); goto release_ecc;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.