/* * Cache stuff... * * Since typical flash erasable sectors are much larger than what Linux's * buffer cache can handle, we must implement read-modify-write on flash * sectors for each block write requests. To avoid over-erasing flash sectors * and to speed things up, we locally cache a whole flash sector while it is * being written to until a different sector is required.
*/
pr_debug("mtdblock: writing cached data for \"%s\" " "at 0x%lx, size 0x%x\n", mtd->name,
mtdblk->cache_offset, mtdblk->cache_size);
ret = erase_write (mtd, mtdblk->cache_offset,
mtdblk->cache_size, mtdblk->cache_data);
/* * Here we could arguably set the cache state to STATE_CLEAN. * However this could lead to inconsistency since we will not * be notified if this content is altered on the flash by other * means. Let's declare it empty and leave buffering tasks to * the buffer cache instead. * * If this cache_offset points to a bad block, data cannot be * written to the device. Clear cache_state to avoid writing to * bad blocks repeatedly.
*/ if (ret == 0 || ret == -EIO)
mtdblk->cache_state = STATE_EMPTY; return ret;
}
if (size == sect_size) { /* * We are covering a whole sector. Thus there is no * need to bother with the cache while it may still be * useful for other partial writes.
*/
ret = erase_write (mtd, pos, size, buf); if (ret) return ret;
} else { /* Partial sector: need to use the cache */
if (mtdblk->cache_state == STATE_DIRTY &&
mtdblk->cache_offset != sect_start) {
ret = write_cached_data(mtdblk); if (ret) return ret;
}
if (mtdblk->cache_state == STATE_EMPTY ||
mtdblk->cache_offset != sect_start) { /* fill the cache with the current sector */
mtdblk->cache_state = STATE_EMPTY;
ret = mtd_read(mtd, sect_start, sect_size,
&retlen, mtdblk->cache_data); if (ret && !mtd_is_bitflip(ret)) return ret; if (retlen != sect_size) return -EIO;
/* * Check if the requested data is already cached * Read the requested amount of data from our internal cache if it * contains what we want, otherwise we read the data directly * from flash.
*/ if (mtdblk->cache_state != STATE_EMPTY &&
mtdblk->cache_offset == sect_start) {
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
ret = mtd_read(mtd, pos, size, &retlen, buf); if (ret && !mtd_is_bitflip(ret)) return ret; if (retlen != size) return -EIO;
}
staticint mtdblock_writesect(struct mtd_blktrans_dev *dev, unsignedlong block, char *buf)
{ struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) {
mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); if (!mtdblk->cache_data) return -EINTR; /* -EINTR is not really correct, but it is the best match * documented in man 2 write for all cases. We could also * return -EAGAIN sometimes, but why bother?
*/
} return do_cached_write(mtdblk, block<<9, 512, buf);
}
if (mtd_type_is_nand(mbd->mtd))
pr_warn_ratelimited("%s: MTD device '%s' is NAND, please consider using UBI block devices instead.\n",
mbd->tr->name, mbd->mtd->name);
/* OK, it's not open. Create cache info for it */
mtdblk->count = 1;
mutex_init(&mtdblk->cache_mutex);
mtdblk->cache_state = STATE_EMPTY; if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) {
mtdblk->cache_size = mbd->mtd->erasesize;
mtdblk->cache_data = NULL;
}
if (!--mtdblk->count) { /* * It was the last usage. Free the cache, but only sync if * opened for writing.
*/ if (mbd->writable)
mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.