/** * init_seen - allocate memory for used for debugging. * @ubi: UBI device description object
*/ staticinlineunsignedlong *init_seen(struct ubi_device *ubi)
{ unsignedlong *ret;
if (!ubi_dbg_chk_fastmap(ubi)) return NULL;
ret = bitmap_zalloc(ubi->peb_count, GFP_NOFS); if (!ret) return ERR_PTR(-ENOMEM);
return ret;
}
/** * free_seen - free the seen logic integer array. * @seen: integer array of @ubi->peb_count size
*/ staticinlinevoid free_seen(unsignedlong *seen)
{
bitmap_free(seen);
}
/** * set_seen - mark a PEB as seen. * @ubi: UBI device description object * @pnum: The PEB to be makred as seen * @seen: integer array of @ubi->peb_count size
*/ staticinlinevoid set_seen(struct ubi_device *ubi, int pnum, unsignedlong *seen)
{ if (!ubi_dbg_chk_fastmap(ubi) || !seen) return;
set_bit(pnum, seen);
}
/** * self_check_seen - check whether all PEB have been seen by fastmap. * @ubi: UBI device description object * @seen: integer array of @ubi->peb_count size
*/ staticint self_check_seen(struct ubi_device *ubi, unsignedlong *seen)
{ int pnum, ret = 0;
if (!ubi_dbg_chk_fastmap(ubi) || !seen) return 0;
for (pnum = 0; pnum < ubi->peb_count; pnum++) { if (!test_bit(pnum, seen) && ubi->lookuptbl[pnum]) {
ubi_err(ubi, "self-check failed for PEB %d, fastmap didn't see it", pnum);
ret = -EINVAL;
}
}
return ret;
}
/** * ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device. * @ubi: UBI device description object
*/
size_t ubi_calc_fm_size(struct ubi_device *ubi)
{
size_t size;
/** * new_fm_vbuf() - allocate a new volume header for fastmap usage. * @ubi: UBI device description object * @vol_id: the VID of the new header * * Returns a new struct ubi_vid_hdr on success. * NULL indicates out of memory.
*/ staticstruct ubi_vid_io_buf *new_fm_vbuf(struct ubi_device *ubi, int vol_id)
{ struct ubi_vid_io_buf *new; struct ubi_vid_hdr *vh;
new = ubi_alloc_vid_buf(ubi, GFP_NOFS); if (!new) goto out;
/* UBI implementations without fastmap support have to delete the * fastmap.
*/
vh->compat = UBI_COMPAT_DELETE;
out: returnnew;
}
/** * add_aeb - create and add a attach erase block to a given list. * @ai: UBI attach info object * @list: the target list * @pnum: PEB number of the new attach erase block * @ec: erease counter of the new LEB * @scrub: scrub this PEB after attaching * * Returns 0 on success, < 0 indicates an internal error.
*/ staticint add_aeb(struct ubi_attach_info *ai, struct list_head *list, int pnum, int ec, int scrub)
{ struct ubi_ainf_peb *aeb;
aeb = ubi_alloc_aeb(ai, pnum, ec); if (!aeb) return -ENOMEM;
/** * add_vol - create and add a new volume to ubi_attach_info. * @ai: ubi_attach_info object * @vol_id: VID of the new volume * @used_ebs: number of used EBS * @data_pad: data padding value of the new volume * @vol_type: volume type * @last_eb_bytes: number of bytes in the last LEB * * Returns the new struct ubi_ainf_volume on success. * NULL indicates an error.
*/ staticstruct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id, int used_ebs, int data_pad, u8 vol_type, int last_eb_bytes)
{ struct ubi_ainf_volume *av;
av = ubi_add_av(ai, vol_id); if (IS_ERR(av)) return av;
/** * update_vol - inserts or updates a LEB which was found a pool. * @ubi: the UBI device object * @ai: attach info object * @av: the volume this LEB belongs to * @new_vh: the volume header derived from new_aeb * @new_aeb: the AEB to be examined * * Returns 0 on success, < 0 indicates an internal error.
*/ staticint update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh, struct ubi_ainf_peb *new_aeb)
{ struct rb_node **p = &av->root.rb_node, *parent = NULL; struct ubi_ainf_peb *aeb, *victim; int cmp_res;
if (be32_to_cpu(new_vh->lnum) != aeb->lnum) { if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
p = &(*p)->rb_left; else
p = &(*p)->rb_right;
continue;
}
/* This case can happen if the fastmap gets written * because of a volume change (creation, deletion, ..). * Then a PEB can be within the persistent EBA and the pool.
*/ if (aeb->pnum == new_aeb->pnum) {
ubi_assert(aeb->lnum == new_aeb->lnum);
ubi_free_aeb(ai, new_aeb);
/** * process_pool_aeb - we found a non-empty PEB in a pool. * @ubi: UBI device object * @ai: attach info object * @new_vh: the volume header derived from new_aeb * @new_aeb: the AEB to be examined * * Returns 0 on success, < 0 indicates an internal error.
*/ staticint process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_vid_hdr *new_vh, struct ubi_ainf_peb *new_aeb)
{ int vol_id = be32_to_cpu(new_vh->vol_id); struct ubi_ainf_volume *av;
/* Find the volume this SEB belongs to */
av = ubi_find_av(ai, vol_id); if (!av) {
ubi_err(ubi, "orphaned volume in fastmap pool!");
ubi_free_aeb(ai, new_aeb); return UBI_BAD_FASTMAP;
}
/** * unmap_peb - unmap a PEB. * If fastmap detects a free PEB in the pool it has to check whether * this PEB has been unmapped after writing the fastmap. * * @ai: UBI attach info object * @pnum: The PEB to be unmapped
*/ staticvoid unmap_peb(struct ubi_attach_info *ai, int pnum)
{ struct ubi_ainf_volume *av; struct rb_node *node, *node2; struct ubi_ainf_peb *aeb;
/** * scan_pool - scans a pool for changed (no longer empty PEBs). * @ubi: UBI device object * @ai: attach info object * @pebs: an array of all PEB numbers in the to be scanned pool * @pool_size: size of the pool (number of entries in @pebs) * @max_sqnum: pointer to the maximal sequence number * @free: list of PEBs which are most likely free (and go into @ai->free) * * Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned. * < 0 indicates an internal error.
*/ staticint scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
__be32 *pebs, int pool_size, unsignedlonglong *max_sqnum, struct list_head *free)
{ struct ubi_vid_io_buf *vb; struct ubi_vid_hdr *vh; struct ubi_ec_hdr *ech; struct ubi_ainf_peb *new_aeb; int i, pnum, err, ret = 0;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) return -ENOMEM;
/* * Now scan all PEBs in the pool to find changes which have been made * after the creation of the fastmap
*/ for (i = 0; i < pool_size; i++) { int scrub = 0; int image_seq;
pnum = be32_to_cpu(pebs[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ubi_err(ubi, "bad PEB in fastmap pool!");
ret = UBI_BAD_FASTMAP; goto out;
}
/** * count_fastmap_pebs - Counts the PEBs found by fastmap. * @ai: The UBI attach info object
*/ staticint count_fastmap_pebs(struct ubi_attach_info *ai)
{ struct ubi_ainf_peb *aeb; struct ubi_ainf_volume *av; struct rb_node *rb1, *rb2; int n = 0;
if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
ubi_err(ubi, "bad pool size: %i", pool_size); goto fail_bad;
}
if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
ubi_err(ubi, "bad WL pool size: %i", wl_pool_size); goto fail_bad;
}
if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_pool_size < 0) {
ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size); goto fail_bad;
}
if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_wl_pool_size < 0) {
ubi_err(ubi, "bad maximal WL pool size: %i",
fm->max_wl_pool_size); goto fail_bad;
}
/* read EC values from free list */ for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad;
ret = add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0); if (ret) goto fail;
}
/* read EC values from used list */ for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad;
ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0); if (ret) goto fail;
}
/* read EC values from scrub list */ for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad;
ret = add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1); if (ret) goto fail;
}
/* read EC values from erase list */ for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec); if (fm_pos >= fm_size) goto fail_bad;
ret = add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1); if (ret) goto fail;
}
/* Iterate over all volumes and read their EBA table */ for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmvhdr); if (fm_pos >= fm_size) goto fail_bad;
/* * If fastmap is leaking PEBs (must not happen), raise a * fat warning and fall back to scanning mode. * We do this here because in ubi_wl_init() it's too late * and we cannot fall back to scanning.
*/ if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
ai->bad_peb_count - fm->used_blocks)) goto fail_bad;
/** * find_fm_anchor - find the most recent Fastmap superblock (anchor) * @ai: UBI attach info to be filled
*/ staticint find_fm_anchor(struct ubi_attach_info *ai)
{ int ret = -1; struct ubi_ainf_peb *aeb; unsignedlonglong max_sqnum = 0;
/** * ubi_scan_fastmap - scan the fastmap. * @ubi: UBI device object * @ai: UBI attach info to be filled * @scan_ai: UBI attach info from the first 64 PEBs, * used to find the most recent Fastmap data structure * * Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found, * UBI_BAD_FASTMAP if one was found but is not usable. * < 0 indicates an internal error.
*/ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_attach_info *scan_ai)
{ struct ubi_fm_sb *fmsb, *fmsb2; struct ubi_vid_io_buf *vb; struct ubi_vid_hdr *vh; struct ubi_ec_hdr *ech; struct ubi_fastmap_layout *fm; struct ubi_ainf_peb *aeb; int i, used_blocks, pnum, fm_anchor, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc; unsignedlonglong sqnum = 0;
fm_anchor = find_fm_anchor(scan_ai); if (fm_anchor < 0) return UBI_NO_FASTMAP;
/* Copy all (possible) fastmap blocks into our new attach structure. */
list_for_each_entry(aeb, &scan_ai->fastmap, u.list) { struct ubi_ainf_peb *new;
new = clone_aeb(ai, aeb); if (!new) return -ENOMEM;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); if (!ech) {
ret = -ENOMEM; goto free_fm_sb;
}
vb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!vb) {
ret = -ENOMEM; goto free_hdr;
}
vh = ubi_get_vid_hdr(vb);
for (i = 0; i < used_blocks; i++) { int image_seq;
pnum = be32_to_cpu(fmsb->block_loc[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ret = UBI_BAD_FASTMAP; goto free_hdr;
}
if (i == 0 && pnum != fm_anchor) {
ubi_err(ubi, "Fastmap anchor PEB mismatch: PEB: %i vs. %i",
pnum, fm_anchor);
ret = UBI_BAD_FASTMAP; goto free_hdr;
}
ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
i, pnum); if (ret > 0)
ret = UBI_BAD_FASTMAP; goto free_hdr;
} elseif (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[i] = 1;
image_seq = be32_to_cpu(ech->image_seq); if (!ubi->image_seq)
ubi->image_seq = image_seq;
/* * Older UBI implementations have image_seq set to zero, so * we shouldn't fail if image_seq == 0.
*/ if (image_seq && (image_seq != ubi->image_seq)) {
ubi_err(ubi, "wrong image seq:%d instead of %d",
be32_to_cpu(ech->image_seq), ubi->image_seq);
ret = UBI_BAD_FASTMAP; goto free_hdr;
}
ret = ubi_io_read_vid_hdr(ubi, pnum, vb, 0); if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
i, pnum); goto free_hdr;
}
if (i == 0) { if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_SB_VOLUME_ID);
ret = UBI_BAD_FASTMAP; goto free_hdr;
}
} else { if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_DATA_VOLUME_ID);
ret = UBI_BAD_FASTMAP; goto free_hdr;
}
}
if (sqnum < be64_to_cpu(vh->sqnum))
sqnum = be64_to_cpu(vh->sqnum);
ret = ubi_io_read_data(ubi, ubi->fm_buf + (ubi->leb_size * i),
pnum, 0, ubi->leb_size); if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, " "err: %i)", i, pnum, ret); goto free_hdr;
}
}
kfree(fmsb);
fmsb = NULL;
fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
tmp_crc = be32_to_cpu(fmsb2->data_crc);
fmsb2->data_crc = 0;
crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); if (crc != tmp_crc) {
ubi_err(ubi, "fastmap data CRC is invalid");
ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
tmp_crc, crc);
ret = UBI_BAD_FASTMAP; goto free_hdr;
}
fmsb2->sqnum = sqnum;
fm->used_blocks = used_blocks;
ret = ubi_attach_fastmap(ubi, ai, fm); if (ret) { if (ret > 0)
ret = UBI_BAD_FASTMAP; goto free_hdr;
}
for (i = 0; i < used_blocks; i++) { struct ubi_wl_entry *e;
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL); if (!e) { while (i--)
kmem_cache_free(ubi_wl_entry_slab, fm->e[i]);
ubi_free_vid_buf(vb);
kfree(ech);
out:
up_write(&ubi->fm_protect); if (ret == UBI_BAD_FASTMAP)
ubi_err(ubi, "Attach by fastmap failed, doing a full scan!"); return ret;
fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
fmsb->version = UBI_FM_FMT_VERSION;
fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks); /* the max sqnum will be filled in while *reading* the fastmap */
fmsb->sqnum = 0;
for (i = 1; i < new_fm->used_blocks; i++) {
dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
dvhdr->lnum = cpu_to_be32(i);
dbg_bld("writing fastmap data to PEB %i sqnum %llu",
new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvbuf); if (ret) {
ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum); goto out_free_seen;
}
}
for (i = 0; i < new_fm->used_blocks; i++) {
ret = ubi_io_write_data(ubi, fm_raw + (i * ubi->leb_size),
new_fm->e[i]->pnum, 0, ubi->leb_size); if (ret) {
ubi_err(ubi, "unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum); goto out_free_seen;
}
}
ubi_assert(new_fm);
ubi->fm = new_fm;
ret = self_check_seen(ubi, seen_pebs);
dbg_bld("fastmap written!");
/** * invalidate_fastmap - destroys a fastmap. * @ubi: UBI device object * * This function ensures that upon next UBI attach a full scan * is issued. We need this if UBI is about to write a new fastmap * but is unable to do so. In this case we have two options: * a) Make sure that the current fastmap will not be usued upon * attach time and contine or b) fall back to RO mode to have the * current fastmap in a valid state. * Returns 0 on success, < 0 indicates an internal error.
*/ staticint invalidate_fastmap(struct ubi_device *ubi)
{ int ret; struct ubi_fastmap_layout *fm; struct ubi_wl_entry *e; struct ubi_vid_io_buf *vb = NULL; struct ubi_vid_hdr *vh;
if (!ubi->fm) return 0;
ubi->fm = NULL;
ret = -ENOMEM;
fm = kzalloc(sizeof(*fm), GFP_NOFS); if (!fm) goto out;
vb = new_fm_vbuf(ubi, UBI_FM_SB_VOLUME_ID); if (!vb) goto out_free_fm;
vh = ubi_get_vid_hdr(vb);
ret = -ENOSPC;
e = ubi_wl_get_fm_peb(ubi, 1); if (!e) goto out_free_fm;
/* * Create fake fastmap such that UBI will fall back * to scanning mode.
*/
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ret = ubi_io_write_vid_hdr(ubi, e->pnum, vb); if (ret < 0) {
ubi_wl_put_fm_peb(ubi, e, 0, 0); goto out_free_fm;
}
fm->used_blocks = 1;
fm->e[0] = e;
ubi->fm = fm;
out:
ubi_free_vid_buf(vb); return ret;
out_free_fm:
kfree(fm); goto out;
}
/** * return_fm_pebs - returns all PEBs used by a fastmap back to the * WL sub-system. * @ubi: UBI device object * @fm: fastmap layout object
*/ staticvoid return_fm_pebs(struct ubi_device *ubi, struct ubi_fastmap_layout *fm)
{ int i;
if (!fm) return;
for (i = 0; i < fm->used_blocks; i++) { if (fm->e[i]) {
ubi_wl_put_fm_peb(ubi, fm->e[i], i,
fm->to_be_tortured[i]);
fm->e[i] = NULL;
}
}
}
/** * ubi_update_fastmap - will be called by UBI if a volume changes or * a fastmap pool becomes full. * @ubi: UBI device object * * Returns 0 on success, < 0 indicates an internal error.
*/ int ubi_update_fastmap(struct ubi_device *ubi)
{ int ret, i, j; struct ubi_fastmap_layout *new_fm, *old_fm; struct ubi_wl_entry *tmp_e;
if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
ubi_err(ubi, "fastmap too large");
ret = -ENOSPC; goto err;
}
for (i = 1; i < new_fm->used_blocks; i++) {
spin_lock(&ubi->wl_lock);
tmp_e = ubi_wl_get_fm_peb(ubi, 0);
spin_unlock(&ubi->wl_lock);
if (!tmp_e) { if (old_fm && old_fm->e[i]) {
ret = ubi_sync_erase(ubi, old_fm->e[i], 0); if (ret < 0) {
ubi_err(ubi, "could not erase old fastmap PEB");
for (j = 1; j < i; j++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[j],
j, 0);
new_fm->e[j] = NULL;
} goto err;
}
new_fm->e[i] = old_fm->e[i];
old_fm->e[i] = NULL;
} else {
ubi_err(ubi, "could not get any free erase block");
if (old_fm && old_fm->e[i]) {
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
old_fm->e[i] = NULL;
}
}
}
/* Old fastmap is larger than the new one */ if (old_fm && new_fm->used_blocks < old_fm->used_blocks) { for (i = new_fm->used_blocks; i < old_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
old_fm->e[i] = NULL;
}
}
if (old_fm) { /* no fresh anchor PEB was found, reuse the old one */ if (!tmp_e) {
ret = ubi_sync_erase(ubi, old_fm->e[0], 0); if (ret < 0) {
ubi_err(ubi, "could not erase old anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i],
i, 0);
new_fm->e[i] = NULL;
} goto err;
}
new_fm->e[0] = old_fm->e[0];
old_fm->e[0] = NULL;
} else { /* we've got a new anchor PEB, return the old one */
ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
old_fm->to_be_tortured[0]);
new_fm->e[0] = tmp_e;
old_fm->e[0] = NULL;
}
} else { if (!tmp_e) {
ubi_err(ubi, "could not find any anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++) {
ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
new_fm->e[i] = NULL;
}
ret = -ENOSPC; goto err;
}
new_fm->e[0] = tmp_e;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.