/* * Read LBA associated with block * returns -1, if block is erased * returns -2 if error happens
*/ staticint sm_read_lba(struct sm_oob *oob)
{ staticconst uint32_t erased_pattern[4] = {
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
uint16_t lba_test; int lba;
/* First test for erased block */ if (!memcmp(oob, erased_pattern, SM_OOB_SIZE)) return -1;
/* Now check is both copies of the LBA differ too much */
lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2; if (lba_test && !is_power_of_2(lba_test)) return -2;
/* And read it */
lba = sm_get_lba(oob->lba_copy1);
again: if (try++) { /* Avoid infinite recursion on CIS reads, sm_recheck_media * won't help anyway
*/ if (zone == 0 && block == ftl->cis_block && boffset ==
ftl->cis_boffset) return ret;
/* Test if media is stable */ if (try == 3 || sm_recheck_media(ftl)) return ret;
}
/* Unfortunately, oob read will _always_ succeed, * despite card removal.....
*/
ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */ if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
dbg("read of block %d at zone %d, failed due to error (%d)",
block, zone, ret); goto again;
}
/* Do a basic test on the oob, to guard against returned garbage */ if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved)) goto again;
/* This should never happen, unless there is a bug in the mtd driver */
WARN_ON(ops.oobretlen != SM_OOB_SIZE);
WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
if (!buffer) return 0;
/* Test if sector marked as bad */ if (!sm_sector_valid(oob)) {
dbg("read of block %d at zone %d, failed because it is marked" " as bad" , block, zone); goto again;
}
/* Test ECC*/ if (mtd_is_eccerr(ret) ||
(ftl->smallpagenand && sm_correct_sector(buffer, oob))) {
dbg("read of block %d at zone %d, failed due to ECC error",
block, zone); goto again;
}
return 0;
}
/* Writes a sector to media */ staticint sm_write_sector(struct sm_ftl *ftl, int zone, int block, int boffset,
uint8_t *buffer, struct sm_oob *oob)
{ struct mtd_oob_ops ops = { }; struct mtd_info *mtd = ftl->trans->mtd; int ret;
BUG_ON(ftl->readonly);
if (zone == 0 && (block == ftl->cis_block || block == 0)) {
dbg("attempted to write the CIS!"); return -EIO;
}
ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
if (ret) {
dbg("write to block %d at zone %d, failed with error %d",
block, zone, ret);
sm_recheck_media(ftl); return ret;
}
/* This should never happen, unless there is a bug in the driver */
WARN_ON(ops.oobretlen != SM_OOB_SIZE);
WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE);
/* Write a block using data and lba, and invalid sector bitmap */ staticint sm_write_block(struct sm_ftl *ftl, uint8_t *buf, int zone, int block, int lba, unsignedlong invalid_bitmap)
{ bool sm_order = IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC); struct sm_oob oob; int boffset; int retry = 0;
/* Initialize the oob with requested values */
memset(&oob, 0xFF, SM_OOB_SIZE);
sm_write_lba(&oob, lba);
restart: if (ftl->unstable) return -EIO;
/* If write fails. try to erase the block */ /* This is safe, because we never write in blocks * that contain valuable data. * This is intended to repair block that are marked * as erased, but that isn't fully erased
*/
if (sm_erase_block(ftl, zone, block, 0)) return -EIO;
/* Mark whole block at offset 'offs' as bad. */ staticvoid sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block)
{ struct sm_oob oob; int boffset;
sm_printk("marking block %d of zone %d as bad", block, zone);
/* We aren't checking the return value, because we don't care */ /* This also fails on fake xD cards, but I guess these won't expose * any bad blocks till fail completely
*/ for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE)
sm_write_sector(ftl, zone, block, boffset, NULL, &oob);
}
/* * Erase a block within a zone * If erase succeeds, it updates free block fifo, otherwise marks block as bad
*/ staticint sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, int put_free)
{ struct ftl_zone *zone = &ftl->zones[zone_num]; struct mtd_info *mtd = ftl->trans->mtd; struct erase_info erase;
/* Thoroughly test that block is valid. */ staticint sm_check_block(struct sm_ftl *ftl, int zone, int block)
{ int boffset; struct sm_oob oob; int lbas[] = { -3, 0, 0, 0 }; int i = 0; int test_lba;
/* First just check that block doesn't look fishy */ /* Only blocks that are valid or are sliced in two parts, are * accepted
*/ for (boffset = 0; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
/* This shouldn't happen anyway */ if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) return -2;
test_lba = sm_read_lba(&oob);
if (lbas[i] != test_lba)
lbas[++i] = test_lba;
/* If we found three different LBAs, something is fishy */ if (i == 3) return -EIO;
}
/* If the block is sliced (partially erased usually) erase it */ if (i == 2) {
sm_erase_block(ftl, zone, block, 1); return 1;
}
staticconst uint8_t cis_signature[] = {
0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20
}; /* Find out media parameters. * This ideally has to be based on nand id, but for now device size is enough
*/ staticint sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
{ int i; int size_in_megs = mtd->size / (1024 * 1024);
ftl->readonly = mtd->type == MTD_ROM;
/* Manual settings for very old devices */
ftl->zone_count = 1;
ftl->smallpagenand = 0;
/* Minimum xD size is 16MiB. Also, all xD cards have standard zone * sizes. SmartMedia cards exist up to 128 MiB and have same layout
*/ if (size_in_megs >= 16) {
ftl->zone_count = size_in_megs / 16;
ftl->zone_size = 1024;
ftl->max_lba = 1000;
ftl->block_size = 32 * SM_SECTOR_SIZE;
}
/* Test for proper write,erase and oob sizes */ if (mtd->erasesize > ftl->block_size) return -ENODEV;
if (mtd->writesize > SM_SECTOR_SIZE) return -ENODEV;
if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE) return -ENODEV;
if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE) return -ENODEV;
/* We use OOB */ if (!mtd_has_oob(mtd)) return -ENODEV;
/* Find geometry information */ for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) { if (chs_table[i].size == size_in_megs) {
ftl->cylinders = chs_table[i].cyl;
ftl->heads = chs_table[i].head;
ftl->sectors = chs_table[i].sec; return 0;
}
}
if (sm_read_sector(ftl,
0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob)) return -EIO;
if (!sm_sector_valid(&oob) || !sm_block_valid(&oob)) return -EIO;
if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset,
cis_signature, sizeof(cis_signature))) { return 0;
}
return -EIO;
}
/* Scan the media for the CIS */ staticint sm_find_cis(struct sm_ftl *ftl)
{ struct sm_oob oob; int block, boffset; int block_found = 0; int cis_found = 0;
/* Search for first valid block */ for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) {
if (sm_read_sector(ftl, 0, block, 0, NULL, &oob)) continue;
if (!sm_block_valid(&oob)) continue;
block_found = 1; break;
}
if (!block_found) return -EIO;
/* Search for first valid sector in this block */ for (boffset = 0 ; boffset < ftl->block_size;
boffset += SM_SECTOR_SIZE) {
if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob)) continue;
if (!cis_found) {
ftl->cis_page_offset = SM_SMALL_PAGE;
cis_found = !sm_read_cis(ftl);
}
if (cis_found) {
dbg("CIS block found at offset %x",
block * ftl->block_size +
boffset + ftl->cis_page_offset); return 0;
} return -EIO;
}
/* Basic test to determine if underlying mtd device if functional */ staticint sm_recheck_media(struct sm_ftl *ftl)
{ if (sm_read_cis(ftl)) {
if (!ftl->unstable) {
sm_printk("media unstable, not allowing writes");
ftl->unstable = 1;
} return -EIO;
} return 0;
}
/* Initialize a FTL zone */ staticint sm_init_zone(struct sm_ftl *ftl, int zone_num)
{ struct ftl_zone *zone = &ftl->zones[zone_num]; struct sm_oob oob;
uint16_t block; int lba; int i = 0; int len;
if (!zone->lba_to_phys_table) return -ENOMEM;
memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2);
/* Allocate memory for free sectors FIFO */ if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) {
kfree(zone->lba_to_phys_table); return -ENOMEM;
}
/* Now scan the zone */ for (block = 0 ; block < ftl->zone_size ; block++) {
/* Skip blocks till the CIS (including) */ if (zone_num == 0 && block <= ftl->cis_block) continue;
/* Read the oob of first sector */ if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) {
kfifo_free(&zone->free_sectors);
kfree(zone->lba_to_phys_table); return -EIO;
}
/* Test to see if block is erased. It is enough to test * first sector, because erase happens in one shot
*/ if (sm_block_erased(&oob)) {
kfifo_in(&zone->free_sectors,
(unsignedchar *)&block, 2); continue;
}
/* If block is marked as bad, skip it */ /* This assumes we can trust first sector*/ /* However the way the block valid status is defined, ensures * very low probability of failure here
*/ if (!sm_block_valid(&oob)) {
dbg("PH %04d <-> ", block); continue;
}
lba = sm_read_lba(&oob);
/* Invalid LBA means that block is damaged. */ /* We can try to erase it, or mark it as bad, but * lets leave that to recovery application
*/ if (lba == -2 || lba >= ftl->max_lba) {
dbg("PH %04d <-> LBA %04d(bad)", block, lba); continue;
}
/* If there is no collision, * just put the sector in the FTL table
*/ if (zone->lba_to_phys_table[lba] < 0) {
dbg_verbose("PH %04d <-> LBA %04d", block, lba);
zone->lba_to_phys_table[lba] = block; continue;
}
sm_printk("collision" " of LBA %d between blocks %d and %d in zone %d",
lba, zone->lba_to_phys_table[lba], block, zone_num);
/* Test that this block is valid*/ if (sm_check_block(ftl, zone_num, block)) continue;
/* Test now the old block */ if (sm_check_block(ftl, zone_num,
zone->lba_to_phys_table[lba])) {
zone->lba_to_phys_table[lba] = block; continue;
}
/* If both blocks are valid and share same LBA, it means that * they hold different versions of same data. It not * known which is more recent, thus just erase one of them
*/
sm_printk("both blocks are valid, erasing the later");
sm_erase_block(ftl, zone_num, block, 1);
}
dbg("zone initialized");
zone->initialized = 1;
/* No free sectors, means that the zone is heavily damaged, write won't * work, but it can still can be (partially) read
*/ if (!kfifo_len(&zone->free_sectors)) {
sm_printk("no free blocks in zone %d", zone_num); return 0;
}
/* Randomize first block we write to */
get_random_bytes(&i, 2);
i %= (kfifo_len(&zone->free_sectors) / 2);
while (i--) {
len = kfifo_out(&zone->free_sectors,
(unsignedchar *)&block, 2);
WARN_ON(len != 2);
kfifo_in(&zone->free_sectors, (constunsignedchar *)&block, 2);
} return 0;
}
/* Get and automatically initialize an FTL mapping for one zone */ staticstruct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num)
{ struct ftl_zone *zone; int error;
BUG_ON(zone_num >= ftl->zone_count);
zone = &ftl->zones[zone_num];
if (!zone->initialized) {
error = sm_init_zone(ftl, zone_num);
if (error) return ERR_PTR(error);
} return zone;
}
/* Put sector in one block cache */ staticvoid sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset)
{
memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE);
clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap);
ftl->cache_clean = 0;
}
/* Read a sector from the cache */ staticint sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset)
{ if (test_bit(boffset / SM_SECTOR_SIZE,
&ftl->cache_data_invalid_bitmap)) return -1;
/* If there are no spare blocks, */ /* we could still continue by erasing/writing the current block, * but for such worn out media it doesn't worth the trouble, * and the dangers
*/ if (kfifo_out(&zone->free_sectors,
(unsignedchar *)&write_sector, 2) != 2) {
dbg("no free sectors for write!"); return -EIO;
}
if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector,
ftl->cache_block, ftl->cache_data_invalid_bitmap)) goto restart;
/* Update the FTL table */
zone->lba_to_phys_table[ftl->cache_block] = write_sector;
/* Write successful, so erase and free the old block */ if (block_num > 0)
sm_erase_block(ftl, zone_num, block_num, 1);
sm_cache_init(ftl); return 0;
}
/* flush timer, runs a second after last write */ staticvoid sm_cache_flush_timer(struct timer_list *t)
{ struct sm_ftl *ftl = timer_container_of(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
zone = sm_get_zone(ftl, zone_num); if (IS_ERR(zone)) {
error = PTR_ERR(zone); goto unlock;
}
/* Have to look at cache first */ if (ftl->cache_zone == zone_num && ftl->cache_block == block) {
in_cache = 1; if (!sm_cache_get(ftl, buf, boffset)) goto unlock;
}
/* Translate the block and return if doesn't exist in the table */
block = zone->lba_to_phys_table[block];
/* Read media information */ if (sm_get_media_info(ftl, mtd)) {
dbg("found unsupported mtd device, aborting"); goto error2;
}
/* Allocate temporary CIS buffer for read retry support */
ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); if (!ftl->cis_buffer) goto error2;
/* Allocate zone array, it will be initialized on demand */
ftl->zones = kcalloc(ftl->zone_count, sizeof(struct ftl_zone),
GFP_KERNEL); if (!ftl->zones) goto error3;
/* Allocate the cache*/
ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL);
if (!ftl->cache_data) goto error4;
sm_cache_init(ftl);
/* Allocate upper layer structure and initialize it */
trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); if (!trans) goto error5;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.