if (fsck_err_on(!g->gen_valid,
trans, ptr_to_missing_alloc_key, "bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n" "while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
p.ptr.gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { if (!p.ptr.cached) {
g->gen_valid = true;
g->gen = p.ptr.gen;
} else { /* this pointer will be dropped */
*do_update = true; goto out;
}
}
/* g->gen_valid == true */
if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
trans, ptr_gen_newer_than_bucket_gen, "bucket %u:%zu data type %s ptr gen in the future: %u > %u\n" "while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
p.ptr.gen, g->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { if (!p.ptr.cached &&
(g->data_type != BCH_DATA_btree ||
data_type == BCH_DATA_btree)) {
g->data_type = data_type;
g->stripe_sectors = 0;
g->dirty_sectors = 0;
g->cached_sectors = 0;
}
*do_update = true;
}
if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
trans, ptr_gen_newer_than_bucket_gen, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n" "while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
p.ptr.gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
*do_update = true;
if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
trans, stale_dirty_ptr, "bucket %u:%zu data type %s stale dirty ptr: %u < %u\n" "while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
bch2_data_type_str(ptr_data_type(k.k, &p.ptr)),
p.ptr.gen, g->gen,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
*do_update = true;
if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen) goto out;
if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
trans, ptr_bucket_data_type_mismatch, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n" "while marking %s",
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
bch2_data_type_str(g->data_type),
bch2_data_type_str(data_type),
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { if (!p.ptr.cached &&
data_type == BCH_DATA_btree) { switch (g->data_type) { case BCH_DATA_sb:
bch_err(c, "btree and superblock in the same bucket - cannot repair");
ret = bch_err_throw(c, fsck_repair_unimplemented); goto out; case BCH_DATA_journal:
ret = bch2_dev_journal_bucket_delete(ca, PTR_BUCKET_NR(ca, &p.ptr));
bch_err_msg(c, ret, "error deleting journal bucket %zu",
PTR_BUCKET_NR(ca, &p.ptr)); if (ret) goto out; break;
}
bkey_extent_entry_for_each_from(ptrs, next_ptr, entry) if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr) goto found;
next_ptr = NULL;
found: if (!next_ptr) {
bch_err(c, "aieee, found stripe ptr with no data ptr"); continue;
}
int ret = bch2_run_explicit_recovery_pass(c, buf,
BCH_RECOVERY_PASS_check_allocations, 0);
if (insert) {
bch2_trans_updates_to_text(buf, trans);
__bch2_inconsistent_error(c, buf); /* * If we're in recovery, run_explicit_recovery_pass might give * us an error code for rewinding recovery
*/ if (!ret)
ret = bch_err_throw(c, bucket_ref_update);
} else { /* Always ignore overwrite errors, so that deletion works */
ret = 0;
}
if (print || insert)
bch2_print_str(c, KERN_ERR, buf->buf); return ret;
}
if (unlikely(gen_after(ptr->gen, b_gen))) {
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "bucket %u:%zu gen %u data type %s: ptr gen %u newer than bucket gen",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen);
ret = bucket_ref_update_err(trans, &buf, k, inserting,
BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen); goto out;
}
if (unlikely(gen_cmp(b_gen, ptr->gen) > BUCKET_GC_GEN_MAX)) {
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "bucket %u:%zu gen %u data type %s: ptr gen %u too stale",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen);
ret = bucket_ref_update_err(trans, &buf, k, inserting,
BCH_FSCK_ERR_ptr_too_stale); goto out;
}
if (b_gen != ptr->gen && ptr->cached) {
ret = 1; goto out;
}
if (unlikely(b_gen != ptr->gen)) {
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "bucket %u:%zu gen %u (mem gen %u) data type %s: stale dirty ptr (gen %u)",
ptr->dev, bucket_nr, b_gen,
bucket_gen_get(ca, bucket_nr),
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
ptr->gen);
ret = bucket_ref_update_err(trans, &buf, k, inserting,
BCH_FSCK_ERR_stale_dirty_ptr); goto out;
}
if (unlikely(bucket_data_type_mismatch(bucket_data_type, ptr_data_type))) {
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "bucket %u:%zu gen %u different types of data in same bucket: %s, %s",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type),
bch2_data_type_str(ptr_data_type));
ret = bucket_ref_update_err(trans, &buf, k, inserting,
BCH_FSCK_ERR_ptr_bucket_data_type_mismatch); goto out;
}
if (unlikely((u64) *bucket_sectors + sectors > U32_MAX)) {
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "bucket %u:%zu gen %u data type %s sector count overflow: %u + %lli > U32_MAX",
ptr->dev, bucket_nr, b_gen,
bch2_data_type_str(bucket_data_type ?: ptr_data_type),
*bucket_sectors, sectors);
/* * Not allowed to reduce sectors_available except by getting a * reservation:
*/
s64 should_not_have_added = added - (s64) disk_res_sectors; if (unlikely(should_not_have_added > 0)) {
u64 old, new;
old = atomic64_read(&c->sectors_available); do { new = max_t(s64, 0, old - should_not_have_added);
} while (!atomic64_try_cmpxchg(&c->sectors_available,
&old, new));
if (p.ptr.cached) {
ret = bch2_mod_dev_cached_sectors(trans, p.ptr.dev, disk_sectors, gc); if (ret) return ret;
} elseif (!p.has_ec) {
replicas_sectors += disk_sectors;
replicas_entry_add_dev(&acc_replicas_key.replicas, p.ptr.dev);
} else {
ret = bch2_trigger_stripe_ptr(trans, k, p, data_type, disk_sectors, flags); if (ret) return ret;
/* * There may be other dirty pointers in this extent, but * if so they're not required for mounting if we have an * erasure coded pointer in this extent:
*/
acc_replicas_key.replicas.nr_required = 0;
}
if (cur_compression_type &&
cur_compression_type != p.crc.compression_type) { if (flags & BTREE_TRIGGER_overwrite)
bch2_u64s_neg(compression_acct, ARRAY_SIZE(compression_acct));
ret = bch2_disk_accounting_mod2(trans, gc, compression_acct,
compression, cur_compression_type); if (ret) return ret;
if (unlikely(flags & BTREE_TRIGGER_check_repair)) return bch2_check_fix_ptrs(trans, btree, level, new.s_c, flags);
/* if pointers aren't changing - nothing to do: */ if (new_ptrs_bytes == old_ptrs_bytes &&
!memcmp(new_ptrs.start,
old_ptrs.start,
new_ptrs_bytes)) return 0;
if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) { if (old.k->type) { int ret = __trigger_extent(trans, btree, level, old,
flags & ~BTREE_TRIGGER_insert); if (ret) return ret;
}
if (new.k->type) { int ret = __trigger_extent(trans, btree, level, new.s_c,
flags & ~BTREE_TRIGGER_overwrite); if (ret) return ret;
}
int need_rebalance_delta = 0;
s64 need_rebalance_sectors_delta[1] = { 0 };
s64 s = bch2_bkey_sectors_need_rebalance(c, old);
need_rebalance_delta -= s != 0;
need_rebalance_sectors_delta[0] -= s;
s = bch2_bkey_sectors_need_rebalance(c, new.s_c);
need_rebalance_delta += s != 0;
need_rebalance_sectors_delta[0] += s;
if ((flags & BTREE_TRIGGER_transactional) && need_rebalance_delta) { int ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_rebalance_work, new.k->p, need_rebalance_delta > 0); if (ret) return ret;
}
if (need_rebalance_sectors_delta[0]) { int ret = bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
need_rebalance_sectors_delta, rebalance_work); if (ret) return ret;
}
}
if (a->v.data_type && type && a->v.data_type != type) { struct printbuf buf = PRINTBUF;
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n" "while marking %s\n",
iter.pos.inode, iter.pos.offset, a->v.gen,
bch2_data_type_str(a->v.data_type),
bch2_data_type_str(type),
bch2_data_type_str(type));
struct bucket *g = gc_bucket(ca, b); if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u when marking metadata type %s",
ca->dev_idx, bch2_data_type_str(data_type))) goto err;
bucket_lock(g); struct bch_alloc_v4 old = bucket_m_to_alloc(*g);
if (bch2_fs_inconsistent_on(g->data_type &&
g->data_type != data_type, c, "different types of data in same bucket: %s, %s",
bch2_data_type_str(g->data_type),
bch2_data_type_str(data_type))) goto err_unlock;
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c, "bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
ca->dev_idx, b, g->gen,
bch2_data_type_str(g->data_type ?: data_type),
g->dirty_sectors, sectors)) goto err_unlock;
g->data_type = data_type;
g->dirty_sectors += sectors; struct bch_alloc_v4 new = bucket_m_to_alloc(*g);
bucket_unlock(g);
ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags); return ret;
err_unlock:
bucket_unlock(g);
err: return bch_err_throw(c, metadata_bucket_inconsistency);
}
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *ca, u64 b, enum bch_data_type type, unsigned sectors, enum btree_iter_update_trigger_flags flags)
{
BUG_ON(type != BCH_DATA_free &&
type != BCH_DATA_sb &&
type != BCH_DATA_journal);
/* * Backup superblock might be past the end of our normal usable space:
*/ if (b >= ca->mi.nbuckets) return 0;
if (flags & BTREE_TRIGGER_gc) return bch2_mark_metadata_bucket(trans, ca, b, type, sectors, flags); elseif (flags & BTREE_TRIGGER_transactional) return commit_do(trans, NULL, NULL, 0,
__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors)); else
BUG();
}
if (b != *bucket && *bucket_sectors) { int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
type, *bucket_sectors, flags); if (ret) return ret;
u64 bucket = 0; unsigned i, bucket_sectors = 0; int ret;
for (i = 0; i < layout.nr_superblocks; i++) {
u64 offset = le64_to_cpu(layout.sb_offset[i]);
if (offset == BCH_SB_SECTOR) {
ret = bch2_trans_mark_metadata_sectors(trans, ca,
0, BCH_SB_SECTOR,
BCH_DATA_sb, &bucket, &bucket_sectors, flags); if (ret) return ret;
}
ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
offset + (1 << layout.sb_max_size_bits),
BCH_DATA_sb, &bucket, &bucket_sectors, flags); if (ret) return ret;
}
if (bucket_sectors) {
ret = bch2_trans_mark_metadata_bucket(trans, ca,
bucket, BCH_DATA_sb, bucket_sectors, flags); if (ret) return ret;
}
for (i = 0; i < ca->journal.nr; i++) {
ret = bch2_trans_mark_metadata_bucket(trans, ca,
ca->journal.buckets[i],
BCH_DATA_journal, ca->mi.bucket_size, flags); if (ret) return ret;
}
return 0;
}
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca, enum btree_iter_update_trigger_flags flags)
{ int ret = bch2_trans_run(c,
__bch2_trans_mark_dev_sb(trans, ca, flags));
bch_err_fn(c, ret); return ret;
}
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c, enum btree_iter_update_trigger_flags flags)
{
for_each_online_member(c, ca, BCH_DEV_READ_REF_trans_mark_dev_sbs) { int ret = bch2_trans_mark_dev_sb(c, ca, flags); if (ret) {
enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_trans_mark_dev_sbs); return ret;
}
}
return 0;
}
int bch2_trans_mark_dev_sbs(struct bch_fs *c)
{ return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.