// SPDX-License-Identifier: GPL-2.0 /* * Common Flash Interface support: * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002) * * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp> * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com> * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com> * * 2_by_8 routines added by Simon Munton * * 4_by_16 work by Carolyn J. Smith * * XIP support hooks by Vitaly Wool (based on code for Intel flash * by Nicolas Pitre) * * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0 * * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
*/
/* * Use status register to poll for Erase/write completion when DQ is not * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in * CFI Primary Vendor-Specific Extended Query table 1.5
*/ staticint cfi_use_status_reg(struct cfi_private *cfi)
{ struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
/* The error bits are invalid while the chip's busy */ if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB))) return 0;
if (map_word_bitsset(map, status, CMD(0x3a))) { unsignedlong chipstatus = MERGESTATUS(status);
if (chipstatus & CFI_SR_ESB)
pr_err("%s erase operation failed, status %lx\n",
map->name, chipstatus); if (chipstatus & CFI_SR_PSB)
pr_err("%s program operation failed, status %lx\n",
map->name, chipstatus); if (chipstatus & CFI_SR_WBASB)
pr_err("%s buffer program command aborted, status %lx\n",
map->name, chipstatus); if (chipstatus & CFI_SR_SLSB)
pr_err("%s sector write protected, status %lx\n",
map->name, chipstatus);
/* Erase/Program status bits are set on the operation failure */ if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB)) return 1;
} return 0;
}
/* #define DEBUG_CFI_FEATURES */
#ifdef DEBUG_CFI_FEATURES staticvoid cfi_tell_features(struct cfi_pri_amdstd *extp)
{ constchar* erase_suspend[3] = { "Not supported", "Read only", "Read/write"
}; constchar* top_bottom[6] = { "No WP", "8x8KiB sectors at top & bottom, no WP", "Bottom boot", "Top boot", "Uniform, Bottom WP", "Uniform, Top WP"
};
#ifdef AMD_BOOTLOC_BUG /* Wheee. Bring me the head of someone at AMD. */ staticvoid fixup_amd_bootblock(struct mtd_info *mtd)
{ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
__u8 major = extp->MajorVersion;
__u8 minor = extp->MinorVersion;
if (((major << 8) | minor) < 0x3131) { /* CFI version 1.0 => don't trust bootloc */
pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
map->name, cfi->mfr, cfi->id);
/* AFAICS all 29LV400 with a bottom boot block have a device ID * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode. * These were badly detected as they have the 0x80 bit set * so treat them as a special case.
*/ if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
/* Macronix added CFI to their 2nd generation * MX29LV400C B/T but AFAICS no other 29LV400 (AMD, * Fujitsu, Spansion, EON, ESI and older Macronix) * has CFI. * * Therefore also check the manufacturer. * This reduces the risk of false detection due to * the 8-bit device ID.
*/
(cfi->mfr == CFI_MFR_MACRONIX)) {
pr_debug("%s: Macronix MX29LV400C with bottom boot block" " detected\n", map->name);
extp->TopBottom = 2; /* bottom boot */
} else if (cfi->id & 0x80) {
printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
extp->TopBottom = 3; /* top boot */
} else {
extp->TopBottom = 2; /* bottom boot */
}
pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;" " deduced %s from Device ID\n", map->name, major, minor,
extp->TopBottom == 2 ? "bottom" : "top");
}
} #endif
/* * These flashes report two separate eraseblock regions based on the * sector_erase-size and block_erase-size, although they both operate on the * same memory. This is not allowed according to CFI, so we just pick the * sector_erase-size.
*/
cfi->cfiq->NumEraseRegions = 1;
}
/* * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where * it should report a size of 8KBytes (0x0020*256).
*/
cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
mtd->name);
}
/* * S29NS512P flash uses more than 8bits to report number of sectors, * which is not permitted by CFI.
*/
cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
mtd->name);
}
staticstruct cfi_fixup fixup_table[] = { /* The CFI vendor ids and the JEDEC vendor IDs appear * to be common. It is like the devices id's are as * well. This table is to pick all cases where * we know that is the case.
*/
{ CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
{ CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
{ 0, 0, NULL }
};
/* * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20: * Some revisions of the M29EW suffer from erase suspend hang ups. In * particular, it can occur when the sequence * Erase Confirm -> Suspend -> Program -> Resume * causes a lockup due to internal timing issues. The consequence is that the * erase cannot be resumed without inserting a dummy command after programming * and prior to resuming. [...] The work-around is to issue a dummy write cycle * that writes an F0 command code before the RESUME command.
*/ staticvoid cfi_fixup_m29ew_erase_suspend(struct map_info *map, unsignedlong adr)
{ struct cfi_private *cfi = map->fldrv_priv; /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */ if (is_m29ew(cfi))
map_write(map, CMD(0xF0), adr);
}
/* * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22: * * Some revisions of the M29EW (for example, A1 and A2 step revisions) * are affected by a problem that could cause a hang up when an ERASE SUSPEND * command is issued after an ERASE RESUME operation without waiting for a * minimum delay. The result is that once the ERASE seems to be completed * (no bits are toggling), the contents of the Flash memory block on which * the erase was ongoing could be inconsistent with the expected values * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84 * values), causing a consequent failure of the ERASE operation. * The occurrence of this issue could be high, especially when file system * operations on the Flash are intensive. As a result, it is recommended * that a patch be applied. Intensive file system operations can cause many * calls to the garbage routine to free Flash space (also by erasing physical * Flash blocks) and as a result, many consecutive SUSPEND and RESUME * commands can occur. The problem disappears when a delay is inserted after * the RESUME command by using the udelay() function available in Linux. * The DELAY value must be tuned based on the customer's platform. * The maximum value that fixes the problem in all cases is 500us. * But, in our experience, a delay of 30 µs to 50 µs is sufficient * in most cases. * We have chosen 500µs because this latency is acceptable.
*/ staticvoid cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
{ /* * Resolving the Delay After Resume Issue see Micron TN-13-07 * Worst case delay must be 500µs but 30-50µs should be ok as well
*/ if (is_m29ew(cfi))
cfi_udelay(500);
}
extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu"); if (extp) { /* * It's a real CFI chip, not one for which the probe * routine faked a CFI structure.
*/
cfi_fixup_major_minor(cfi, extp);
for (i=0; i< cfi->numchips; i++) {
cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; /* * First calculate the timeout max according to timeout field * of struct cfi_ident that probed from chip's CFI aera, if * available. Specify a minimum of 2000us, in case the CFI data * is wrong.
*/ if (cfi->cfiq->BufWriteTimeoutTyp &&
cfi->cfiq->BufWriteTimeoutMax)
cfi->chips[i].buffer_write_time_max =
1 << (cfi->cfiq->BufWriteTimeoutTyp +
cfi->cfiq->BufWriteTimeoutMax); else
cfi->chips[i].buffer_write_time_max = 0;
/* * Return true if the chip is ready and has the correct value. * * Ready is one of: read mode, query mode, erase-suspend-read mode (in any * non-suspended sector) and is indicated by no toggle bits toggling. * * Error are indicated by toggling bits or bits held with the wrong value, * or with bits toggling. * * Note that anything more complicated than checking if no bits are toggling * (including checking DQ5 for an error status) is tricky to get working * correctly and is therefore not done (particularly with interleaved chips * as each chip must be checked independently of the others).
*/ staticint __xipram chip_ready(struct map_info *map, struct flchip *chip, unsignedlong addr, map_word *expected)
{ struct cfi_private *cfi = map->fldrv_priv;
map_word oldd, curd; int ret;
if (cfi_use_status_reg(cfi)) {
map_word ready = CMD(CFI_SR_DRB); /* * For chips that support status register, check device * ready bit
*/
cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
curd = map_read(map, addr);
case FL_STATUS: for (;;) { if (chip_ready(map, chip, adr, NULL)) break;
if (time_after(jiffies, timeo)) {
printk(KERN_ERR "Waiting for chip to be ready timed out.\n"); return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex); /* Someone else might have been playing with it. */ goto retry;
} return 0;
case FL_READY: case FL_CFI_QUERY: case FL_JEDEC_QUERY: return 0;
/* Do not allow suspend iff read/write to EB address */ if ((adr & chip->in_progress_block_mask) ==
chip->in_progress_block_addr) goto sleep;
/* Erase suspend */ /* It's harmless to issue the Erase-Suspend and Erase-Resume
* commands when the erase algorithm isn't in progress. */
map_write(map, CMD(0xB0), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1; for (;;) { if (chip_ready(map, chip, adr, NULL)) break;
if (time_after(jiffies, timeo)) { /* Should have suspended the erase by now. * Send an Erase-Resume command as either * there was an error (so leave the erase * routine to recover from it) or we trying to
* use the erase-in-progress sector. */
put_chip(map, chip, adr);
printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__); return -EIO;
}
mutex_unlock(&chip->mutex);
cfi_udelay(1);
mutex_lock(&chip->mutex); /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
So we can just loop here. */
}
chip->state = FL_READY; return 0;
case FL_XIP_WHILE_ERASING:
chip->state = chip->oldstate;
chip->oldstate = FL_READY; break;
case FL_READY: case FL_STATUS: break; default:
printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
}
wake_up(&chip->wq);
}
#ifdef CONFIG_MTD_XIP
/* * No interrupt what so ever can be serviced while the flash isn't in array * mode. This is ensured by the xip_disable() and xip_enable() functions * enclosing any code path where the flash is known not to be in array mode. * And within a XIP disabled code path, only functions marked with __xipram * may be called and nothing else (it's a good thing to inspect generated * assembly to make sure inline functions were actually inlined and that gcc * didn't emit calls to its own support functions). Also configuring MTD CFI * support to a single buswidth and a single interleave is also recommended.
*/
staticvoid xip_disable(struct map_info *map, struct flchip *chip, unsignedlong adr)
{ /* TODO: chips with no XIP use should ignore and return */
(void) map_read(map, adr); /* ensure mmu mapping is up to date */
local_irq_disable();
}
/* * When a delay is required for the flash operation to complete, the * xip_udelay() function is polling for both the given timeout and pending * (but still masked) hardware interrupts. Whenever there is an interrupt * pending then the flash erase operation is suspended, array mode restored * and interrupts unmasked. Task scheduling might also happen at that * point. The CPU eventually returns from the interrupt or the call to * schedule() and the suspended flash operation is resumed for the remaining * of the delay period. * * Warning: this function _will_ fool interrupt latency tracing tools.
*/
do {
cpu_relax(); if (xip_irqpending() && extp &&
((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
(cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) { /* * Let's suspend the erase operation when supported. * Note that we currently don't try to suspend * interleaved chips if there is already another * operation suspended (imagine what happens * when one chip was already done with the current * operation while another chip suspended it, then * we resume the whole thing at once). Yes, it * can happen!
*/
map_write(map, CMD(0xb0), adr);
usec -= xip_elapsed_since(start);
suspended = xip_currtime(); do { if (xip_elapsed_since(suspended) > 100000) { /* * The chip doesn't want to suspend * after waiting for 100 msecs. * This is a critical error but there * is not much we can do here.
*/ return;
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK));
/* * We're back. However someone else might have * decided to go write to the chip if we are in * a suspended erase state. If so let's wait * until it's done.
*/
mutex_lock(&chip->mutex); while (chip->state != FL_XIP_WHILE_ERASING) {
DECLARE_WAITQUEUE(wait, current);
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&chip->wq, &wait);
mutex_unlock(&chip->mutex);
schedule();
remove_wait_queue(&chip->wq, &wait);
mutex_lock(&chip->mutex);
} /* Disallow XIP again */
local_irq_disable();
/* Correct Erase Suspend Hangups for M29EW */
cfi_fixup_m29ew_erase_suspend(map, adr); /* Resume the write or erase operation */
map_write(map, cfi->sector_erase_cmd, adr);
chip->state = oldstate;
start = xip_currtime();
} elseif (usec >= 1000000/HZ) { /* * Try to save on CPU power when waiting delay * is at least a system timer tick period. * No need to be extremely accurate here.
*/
xip_cpu_idle();
}
status = map_read(map, adr);
} while (!map_word_andequal(map, status, OK, OK)
&& xip_elapsed_since(start) < usec);
}
/* * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while * the flash is actively programming or erasing since we have to poll for * the operation to complete anyway. We can't do that in a generic way with * a XIP setup so do it before the actual flash operation in this case * and stub it out from INVALIDATE_CACHE_UDELAY.
*/ #define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
/* * Extra notes: * * Activating this XIP support changes the way the code works a bit. For * example the code to suspend the current process when concurrent access * happens is never executed because xip_udelay() will always return with the * same chip state as it was entered with. This is why there is no care for * the presence of add_wait_queue() or schedule() calls from within a couple * xip_disable()'d areas of code, like in do_erase_oneblock for example. * The queueing and scheduling are always happening within xip_udelay(). * * Similarly, get_chip() and put_chip() just happen to always be executed * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state * is in array mode, therefore never executing many cases therein and not * causing any problem with XIP.
*/
staticint do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
size_t len, u_char *buf, size_t grouplen)
{ int ret; while (len) { unsignedlong bus_ofs = adr & ~(map_bankwidth(map)-1); int gap = adr - bus_ofs; int n = min_t(int, len, map_bankwidth(map) - gap);
map_word datum = map_word_ff(map);
if (n != map_bankwidth(map)) { /* partial write of a word, load old contents */
otp_enter(map, chip, bus_ofs, map_bankwidth(map));
datum = map_read(map, bus_ofs);
otp_exit(map, chip, bus_ofs, map_bankwidth(map));
}
datum = map_word_load_partial(map, datum, buf, gap, n);
ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE); if (ret) return ret;
staticint __xipram do_write_oneword_once(struct map_info *map, struct flchip *chip, unsignedlong adr, map_word datum, int mode, struct cfi_private *cfi)
{ unsignedlong timeo; /* * We use a 1ms + 1 jiffies generic timeout for writes (most devices * have a max write time of a few hundreds usec). However, we should * use the maximum timeout value given by the chip at probe time * instead. Unfortunately, struct flchip does have a field for * maximum timeout, only for typical which can be far too short * depending of the conditions. The ' + 1' is to avoid having a * timeout of 0 jiffies if HZ is smaller than 1000.
*/ unsignedlong uWriteTimeout = (HZ / 1000) + 1; int ret = 0;
/* * We check "time_after" and "!chip_good" before checking * "chip_good" to avoid the failure due to scheduling.
*/ if (time_after(jiffies, timeo) &&
!chip_good(map, chip, adr, &datum)) {
xip_enable(map, chip, adr);
printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
xip_disable(map, chip, adr);
ret = -EIO; break;
}
if (chip_good(map, chip, adr, &datum)) { if (cfi_check_err_status(map, chip, adr))
ret = -EIO; break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
}
return ret;
}
staticint __xipram do_write_oneword_start(struct map_info *map, struct flchip *chip, unsignedlong adr, int mode)
{ int ret;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, mode); if (ret) {
mutex_unlock(&chip->mutex); return ret;
}
if (mode == FL_OTP_WRITE)
otp_enter(map, chip, adr, map_bankwidth(map));
staticint __xipram do_write_oneword_retry(struct map_info *map, struct flchip *chip, unsignedlong adr, map_word datum, int mode)
{ struct cfi_private *cfi = map->fldrv_priv; int ret = 0;
map_word oldd; int retry_cnt = 0;
/* * Check for a NOP for the case when the datum to write is already * present - it saves time and works around buggy chips that corrupt * data at other locations when 0xff is written to a location that * already contains 0xff.
*/
oldd = map_read(map, adr); if (map_word_equal(map, oldd, datum)) {
pr_debug("MTD %s(): NOP\n", __func__); return ret;
}
retry:
ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi); if (ret) { /* reset on all failures. */
map_write(map, CMD(0xF0), chip->start); /* FIXME - should have reset delay before continuing */
if (++retry_cnt <= MAX_RETRIES) goto retry;
}
xip_enable(map, chip, adr);
return ret;
}
staticint __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsignedlong adr, map_word datum, int mode)
{ int ret;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
chipstart = cfi->chips[chipnum].start;
/* If it's not bus-aligned, do the first byte write */ if (ofs & (map_bankwidth(map)-1)) { unsignedlong bus_ofs = ofs & ~(map_bankwidth(map)-1); int i = ofs - bus_ofs; int n = 0;
map_word tmp_buf;
retry:
mutex_lock(&cfi->chips[chipnum].mutex);
if (cfi->chips[chipnum].state != FL_READY) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&cfi->chips[chipnum].wq, &wait);
/* * Timeout is calculated according to CFI data, if available. * See more comments in cfi_cmdset_0002().
*/
u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
timeo = jiffies + u_write_timeout;
for (;;) { if (chip->state != FL_WRITING) { /* Someone's suspended the write. Sleep */
DECLARE_WAITQUEUE(wait, current);
/* * We check "time_after" and "!chip_good" before checking * "chip_good" to avoid the failure due to scheduling.
*/ if (time_after(jiffies, timeo) &&
!chip_good(map, chip, adr, &datum)) {
pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
__func__, adr);
ret = -EIO; break;
}
if (chip_good(map, chip, adr, &datum)) { if (cfi_check_err_status(map, chip, adr))
ret = -EIO; break;
}
/* Latency issues. Drop the lock, wait a while and retry */
UDELAY(map, chip, adr, 1);
}
return ret;
}
staticvoid __xipram do_write_buffer_reset(struct map_info *map, struct flchip *chip, struct cfi_private *cfi)
{ /* * Recovery from write-buffer programming failures requires * the write-to-buffer-reset sequence. Since the last part * of the sequence also works as a normal reset, we can run * the same commands regardless of why we are here. * See e.g. * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
*/
cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
cfi->device_type, NULL);
cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
cfi->device_type, NULL);
/* FIXME - should have reset delay before continuing */
}
/* * FIXME: interleaved mode not tested, and probably not supported!
*/ staticint __xipram do_write_buffer(struct map_info *map, struct flchip *chip, unsignedlong adr, const u_char *buf, int len)
{ struct cfi_private *cfi = map->fldrv_priv; int ret; unsignedlong cmd_adr; int z, words;
map_word datum;
adr += chip->start;
cmd_adr = adr;
mutex_lock(&chip->mutex);
ret = get_chip(map, chip, adr, FL_WRITING); if (ret) {
mutex_unlock(&chip->mutex); return ret;
}
/* Write length of data to come */
words = len / map_bankwidth(map);
map_write(map, CMD(words - 1), cmd_adr); /* Write data */
z = 0; while(z < words * map_bankwidth(map)) {
datum = map_word_load(map, buf);
map_write(map, datum, adr + z);
z += map_bankwidth(map);
buf += map_bankwidth(map);
}
z -= map_bankwidth(map);
adr += z;
/* Write Buffer Program Confirm: GO GO GO */
map_write(map, CMD(0x29), cmd_adr);
chip->state = FL_WRITING;
staticint cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
size_t *retlen, const u_char *buf)
{ struct map_info *map = mtd->priv; struct cfi_private *cfi = map->fldrv_priv; int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize; int ret; int chipnum; unsignedlong ofs;
chipnum = to >> cfi->chipshift;
ofs = to - (chipnum << cfi->chipshift);
/* If it's not bus-aligned, do the first word write */ if (ofs & (map_bankwidth(map)-1)) {
size_t local_len = (-ofs)&(map_bankwidth(map)-1); if (local_len > len)
local_len = len;
ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
local_len, retlen, buf); if (ret) return ret;
ofs += local_len;
buf += local_len;
len -= local_len;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0; if (chipnum == cfi->numchips) return 0;
}
}
/* Write buffer is worth it only if more than one word to write... */ while (len >= map_bankwidth(map) * 2) { /* We must not cross write block boundaries */ int size = wbufsize - (ofs & (wbufsize-1));
if (size > len)
size = len; if (size % map_bankwidth(map))
size -= size % map_bankwidth(map);
ret = do_write_buffer(map, &cfi->chips[chipnum],
ofs, buf, size); if (ret) return ret;
if (ofs >> cfi->chipshift) {
chipnum ++;
ofs = 0; if (chipnum == cfi->numchips) return 0;
}
}
if (len) {
size_t retlen_dregs = 0;
ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
len, &retlen_dregs, buf);
*retlen += retlen_dregs; return ret;
}
return 0;
} #endif/* !FORCE_WORD_WRITE */
/* * Wait for the flash chip to become ready to write data * * This is only called during the panic_write() path. When panic_write() * is called, the kernel is in the process of a panic, and will soon be * dead. Therefore we don't take any locks, and attempt to get access * to the chip as soon as possible.
*/ staticint cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip, unsignedlong adr)
{ struct cfi_private *cfi = map->fldrv_priv; int retries = 10; int i;
/* * If the driver thinks the chip is idle, and no toggle bits * are changing, then the chip is actually idle for sure.
*/ if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL)) return 0;
/* * Try several times to reset the chip and then wait for it * to become idle. The upper limit of a few milliseconds of * delay isn't a big problem: the kernel is dying anyway. It * is more important to save the messages.
*/ while (retries > 0) { constunsignedlong timeo = (HZ / 1000) + 1;
/* send the reset command */
map_write(map, CMD(0xF0), chip->start);
/* wait for the chip to become ready */ for (i = 0; i < jiffies_to_usecs(timeo); i++) { if (chip_ready(map, chip, adr, NULL)) return 0;
udelay(1);
}
retries--;
}
/* the chip never became ready */ return -EBUSY;
}
/* * Write out one word of data to a single flash chip during a kernel panic * * This is only called during the panic_write() path. When panic_write() * is called, the kernel is in the process of a panic, and will soon be * dead. Therefore we don't take any locks, and attempt to get access * to the chip as soon as possible. * * The implementation of this routine is intentionally similar to * do_write_oneword(), in order to ease code maintenance.
*/ staticint do_panic_write_oneword(struct map_info *map, struct flchip *chip, unsignedlong adr, map_word datum)
{ constunsignedlong uWriteTimeout = (HZ / 1000) + 1; struct cfi_private *cfi = map->fldrv_priv; int retry_cnt = 0;
map_word oldd; int ret; int i;
adr += chip->start;
ret = cfi_amdstd_panic_wait(map, chip, adr); if (ret) return ret;
/* * Check for a NOP for the case when the datum to write is already * present - it saves time and works around buggy chips that corrupt * data at other locations when 0xff is written to a location that * already contains 0xff.
*/
oldd = map_read(map, adr); if (map_word_equal(map, oldd, datum)) {
pr_debug("MTD %s(): NOP\n", __func__); goto op_done;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.