u64 bch2_recovery_passes_to_stable(u64 v)
{
u64 ret = 0; for (unsigned i = 0; i < ARRAY_SIZE(passes_to_stable_map); i++) if (v & BIT_ULL(i))
ret |= BIT_ULL(passes_to_stable_map[i]); return ret;
}
u64 bch2_recovery_passes_from_stable(u64 v)
{
u64 ret = 0; for (unsigned i = 0; i < ARRAY_SIZE(passes_from_stable_map); i++) if (v & BIT_ULL(i))
ret |= BIT_ULL(passes_from_stable_map[i]); return ret;
}
/* * Ratelimit if the last runtime was more than 1% of the time * since we last ran
*/
ret = (u64) le32_to_cpu(i->last_runtime) * 100 >
ktime_get_real_seconds() - le64_to_cpu(i->last_run);
if (BCH_RECOVERY_PASS_NO_RATELIMIT(i))
ret = false;
}
/* * After we go RW, the journal keys buffer can't be modified (except for * setting journal_key->overwritten: it will be accessed by multiple * threads
*/
move_gap(keys, keys->nr);
set_bit(BCH_FS_may_go_rw, &c->flags);
if (go_rw_in_recovery(c)) { if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) {
bch_info(c, "mounting a filesystem with no alloc info read-write; will recreate");
bch2_reconstruct_alloc(c);
}
return bch2_fs_read_write_early(c);
} return 0;
}
/* * Make sure root inode is readable while we're still in recovery and can rewind * for repair:
*/ staticint bch2_lookup_root_inode(struct bch_fs *c)
{
subvol_inum inum = BCACHEFS_ROOT_SUBVOL_INUM; struct bch_inode_unpacked inode_u; struct bch_subvolume subvol;
/* * Never run scan_for_btree_nodes persistently: check_topology will run * it if required
*/ if (pass == BCH_RECOVERY_PASS_scan_for_btree_nodes)
*flags |= RUN_RECOVERY_PASS_nopersistent;
if ((*flags & RUN_RECOVERY_PASS_ratelimit) &&
!bch2_recovery_pass_want_ratelimit(c, pass))
*flags &= ~RUN_RECOVERY_PASS_ratelimit;
/* * If RUN_RECOVERY_PASS_nopersistent is set, we don't want to do * anything if the pass has already run: these mean we need a prior pass * to run before we continue to repair, we don't expect that pass to fix * the damage we encountered. * * Otherwise, we run run_explicit_recovery_pass when we find damage, so * it should run again even if it's already run:
*/ bool in_recovery = test_bit(BCH_FS_in_recovery, &c->flags); bool persistent = !in_recovery || !(*flags & RUN_RECOVERY_PASS_nopersistent); bool rewind = in_recovery &&
r->curr_pass > pass &&
!(r->passes_complete & BIT_ULL(pass));
if (!(*flags & RUN_RECOVERY_PASS_ratelimit) &&
(r->passes_ratelimiting & BIT_ULL(pass))) returntrue;
if (rewind) returntrue;
returnfalse;
}
/* * For when we need to rewind recovery passes and run a pass we skipped:
*/ int __bch2_run_explicit_recovery_pass(struct bch_fs *c, struct printbuf *out, enum bch_recovery_pass pass, enum bch_run_recovery_pass_flags flags)
{ struct bch_fs_recovery *r = &c->recovery; int ret = 0;
if (!(p->when & PASS_SILENT))
bch2_print(c, KERN_INFO bch2_log_msg(c, "%s..."),
bch2_recovery_passes[pass]);
s64 start_time = ktime_get_real_seconds(); int ret = p->fn(c);
r->passes_to_run &= ~BIT_ULL(pass);
if (ret) {
r->passes_failing |= BIT_ULL(pass); return ret;
}
r->passes_failing = 0;
if (!test_bit(BCH_FS_error, &c->flags))
bch2_sb_recovery_pass_complete(c, pass, start_time);
if (!(p->when & PASS_SILENT))
bch2_print(c, KERN_CONT " done\n");
return 0;
}
staticint __bch2_run_recovery_passes(struct bch_fs *c, u64 orig_passes_to_run, bool online)
{ struct bch_fs_recovery *r = &c->recovery; int ret = 0;
spin_lock_irq(&r->lock);
if (online)
orig_passes_to_run &= bch2_recovery_passes_match(PASS_ONLINE);
if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))
orig_passes_to_run &= ~bch2_recovery_passes_match(PASS_ALLOC);
/* * A failed recovery pass will be retried after another pass succeeds - * but not this iteration. * * This is because some passes depend on repair done by other passes: we * may want to retry, but we don't want to loop on failing passes.
*/
if (c->opts.recovery_pass_last)
passes &= BIT_ULL(c->opts.recovery_pass_last + 1) - 1;
/* * We can't allow set_may_go_rw to be excluded; that would cause us to * use the journal replay keys for updates where it's not expected.
*/
c->opts.recovery_passes_exclude &= ~BCH_RECOVERY_PASS_set_may_go_rw;
passes &= ~c->opts.recovery_passes_exclude;
passes &= ~(BIT_ULL(from) - 1);
down(&c->recovery.run_lock); int ret = __bch2_run_recovery_passes(c, passes, false);
up(&c->recovery.run_lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.