/* * This is only the first step towards a full-features scrub. It reads all * extent and super block and verifies the checksums. In case a bad checksum * is found or the extent cannot be read, good data will be written back if * any can be found. * * Future enhancements: * - In case an unrepairable extent is encountered, track which files are * affected and report them * - track and record media errors, throw out bad devices * - add a mode to also read unallocated space
*/
struct scrub_ctx;
/* * The following value only influences the performance. * * This determines how many stripes would be submitted in one go, * which is 512KiB (BTRFS_STRIPE_LEN * SCRUB_STRIPES_PER_GROUP).
*/ #define SCRUB_STRIPES_PER_GROUP 8
/* * How many groups we have for each sctx. * * This would be 8M per device, the same value as the old scrub in-flight bios * size limit.
*/ #define SCRUB_GROUPS_PER_SCTX 16
/* * The following value times PAGE_SIZE needs to be large enough to match the * largest node/leaf/sector size that shall be supported.
*/ #define SCRUB_MAX_SECTORS_PER_BLOCK (BTRFS_MAX_METADATA_BLOCKSIZE / SZ_4K)
/* Represent one sector and its needed info to verify the content. */ struct scrub_sector_verification { union { /* * Csum pointer for data csum verification. Should point to a * sector csum inside scrub_stripe::csums. * * NULL if this data sector has no csum.
*/
u8 *csum;
/* * Extra info for metadata verification. All sectors inside a * tree block share the same generation.
*/
u64 generation;
};
};
enum scrub_stripe_flags { /* Set when @mirror_num, @dev, @physical and @logical are set. */
SCRUB_STRIPE_FLAG_INITIALIZED,
/* Set when the read-repair is finished. */
SCRUB_STRIPE_FLAG_REPAIR_DONE,
/* * Set for data stripes if it's triggered from P/Q stripe. * During such scrub, we should not report errors in data stripes, nor * update the accounting.
*/
SCRUB_STRIPE_FLAG_NO_REPORT,
};
/* * We have multiple bitmaps for one scrub_stripe. * However each bitmap has at most (BTRFS_STRIPE_LEN / blocksize) bits, * which is normally 16, and much smaller than BITS_PER_LONG (32 or 64). * * So to reduce memory usage for each scrub_stripe, we pack those bitmaps * into a larger one. * * These enum records where the sub-bitmap are inside the larger one. * Each subbitmap starts at scrub_bitmap_nr_##name * nr_sectors bit.
*/ enum { /* Which blocks are covered by extent items. */
scrub_bitmap_nr_has_extent = 0,
/* Which blocks are meteadata. */
scrub_bitmap_nr_is_metadata,
/* * Which blocks have errors, including IO, csum, and metadata * errors. * This sub-bitmap is the OR results of the next few error related * sub-bitmaps.
*/
scrub_bitmap_nr_error,
scrub_bitmap_nr_io_error,
scrub_bitmap_nr_csum_error,
scrub_bitmap_nr_meta_error,
scrub_bitmap_nr_meta_gen_error,
scrub_bitmap_nr_last,
};
/* * Indicate the states of the stripe. Bits are defined in * scrub_stripe_flags enum.
*/ unsignedlong state;
/* The large bitmap contains all the sub-bitmaps. */ unsignedlong bitmaps[BITS_TO_LONGS(scrub_bitmap_nr_last *
(BTRFS_STRIPE_LEN / BTRFS_MIN_BLOCKSIZE))];
/* * For writeback (repair or replace) error reporting. * This one is protected by a spinlock, thus can not be packed into * the larger bitmap.
*/ unsignedlong write_error_bitmap;
/* Writeback can be concurrent, thus we need to protect the bitmap. */
spinlock_t write_error_lock;
/* * Checksum for the whole stripe if this stripe is inside a data block * group.
*/
u8 *csums;
struct work_struct work;
};
struct scrub_ctx { struct scrub_stripe stripes[SCRUB_TOTAL_STRIPES]; struct scrub_stripe *raid56_data_stripes; struct btrfs_fs_info *fs_info; struct btrfs_path extent_path; struct btrfs_path csum_path; int first_free; int cur_stripe;
atomic_t cancel_req; int readonly;
/* State of IO submission throttling affecting the associated device */
ktime_t throttle_deadline;
u64 throttle_sent;
/* * Use a ref counter to avoid use-after-free issues. Scrub workers * decrement bios_in_flight and workers_pending and then do a wakeup * on the list_wait wait queue. We must ensure the main scrub task * doesn't free the scrub context before or while the workers are * doing the wakeup() call.
*/
refcount_t refs;
};
/* * init_path might indirectly call vmalloc, or use GFP_KERNEL. Scrub * uses GFP_NOFS in this context, so we keep it consistent but it does * not seem to be strictly necessary.
*/
nofs_flag = memalloc_nofs_save();
ipath = init_ipath(4096, local_root, swarn->path);
memalloc_nofs_restore(nofs_flag); if (IS_ERR(ipath)) {
btrfs_put_root(local_root);
ret = PTR_ERR(ipath);
ipath = NULL; goto err;
}
ret = paths_from_inode(inum, ipath);
if (ret < 0) goto err;
/* * we deliberately ignore the bit ipath might have been too small to * hold all of the paths here
*/ for (i = 0; i < ipath->fspath->elem_cnt; ++i)
btrfs_warn(fs_info, "scrub: %s at logical %llu on dev %s, physical %llu root %llu inode %llu offset %llu length %u links %u (path: %s)",
swarn->errstr, swarn->logical,
btrfs_dev_name(swarn->dev),
swarn->physical,
root, inum, offset,
fs_info->sectorsize, nlink,
(char *)(unsignedlong)ipath->fspath->val[i]);
/* Super block error, no need to search extent tree. */ if (is_super) {
btrfs_warn(fs_info, "scrub: %s on device %s, physical %llu",
errstr, btrfs_dev_name(dev), physical); return;
}
path = btrfs_alloc_path(); if (!path) return;
/* stripe->pages[] is allocated by us and no highmem is allowed. */
ASSERT(page);
ASSERT(!PageHighMem(page)); return page_address(page) + offset_in_page(offset);
}
/* * Here we don't have a good way to attach the pages (and subpages) * to a dummy extent buffer, thus we have to directly grab the members * from pages.
*/
memcpy(on_disk_csum, header->csum, fs_info->csum_size);
if (logical != btrfs_stack_header_bytenr(header)) {
scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info, "scrub: tree block %llu mirror %u has bad bytenr, has %llu want %llu",
logical, stripe->mirror_num,
btrfs_stack_header_bytenr(header), logical); return;
} if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
BTRFS_FSID_SIZE) != 0) {
scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info, "scrub: tree block %llu mirror %u has bad fsid, has %pU want %pU",
logical, stripe->mirror_num,
header->fsid, fs_info->fs_devices->fsid); return;
} if (memcmp(header->chunk_tree_uuid, fs_info->chunk_tree_uuid,
BTRFS_UUID_SIZE) != 0) {
scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info, "scrub: tree block %llu mirror %u has bad chunk tree uuid, has %pU want %pU",
logical, stripe->mirror_num,
header->chunk_tree_uuid, fs_info->chunk_tree_uuid); return;
}
/* Now check tree block csum. */
shash->tfm = fs_info->csum_shash;
crypto_shash_init(shash);
crypto_shash_update(shash, first_kaddr + BTRFS_CSUM_SIZE,
fs_info->sectorsize - BTRFS_CSUM_SIZE);
for (int i = sector_nr + 1; i < sector_nr + sectors_per_tree; i++) {
crypto_shash_update(shash, scrub_stripe_get_kaddr(stripe, i),
fs_info->sectorsize);
}
crypto_shash_final(shash, calculated_csum); if (memcmp(calculated_csum, on_disk_csum, fs_info->csum_size) != 0) {
scrub_bitmap_set_meta_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info, "scrub: tree block %llu mirror %u has bad csum, has " CSUM_FMT " want " CSUM_FMT,
logical, stripe->mirror_num,
CSUM_FMT_VALUE(fs_info->csum_size, on_disk_csum),
CSUM_FMT_VALUE(fs_info->csum_size, calculated_csum)); return;
} if (stripe->sectors[sector_nr].generation !=
btrfs_stack_header_generation(header)) {
scrub_bitmap_set_meta_gen_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_set_error(stripe, sector_nr, sectors_per_tree);
btrfs_warn_rl(fs_info, "scrub: tree block %llu mirror %u has bad generation, has %llu want %llu",
logical, stripe->mirror_num,
btrfs_stack_header_generation(header),
stripe->sectors[sector_nr].generation); return;
}
scrub_bitmap_clear_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_clear_csum_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_clear_meta_error(stripe, sector_nr, sectors_per_tree);
scrub_bitmap_clear_meta_gen_error(stripe, sector_nr, sectors_per_tree);
}
/* Sector not utilized, skip it. */ if (!scrub_bitmap_test_bit_has_extent(stripe, sector_nr)) return;
/* IO error, no need to check. */ if (scrub_bitmap_test_bit_io_error(stripe, sector_nr)) return;
/* Metadata, verify the full tree block. */ if (scrub_bitmap_test_bit_is_metadata(stripe, sector_nr)) { /* * Check if the tree block crosses the stripe boundary. If * crossed the boundary, we cannot verify it but only give a * warning. * * This can only happen on a very old filesystem where chunks * are not ensured to be stripe aligned.
*/ if (unlikely(sector_nr + sectors_per_tree > stripe->nr_sectors)) {
btrfs_warn_rl(fs_info, "scrub: tree block at %llu crosses stripe boundary %llu",
stripe->logical +
(sector_nr << fs_info->sectorsize_bits),
stripe->logical); return;
}
scrub_verify_one_metadata(stripe, sector_nr); return;
}
/* * Data is easier, we just verify the data csum (if we have it). For * cases without csum, we have no other choice but to trust it.
*/ if (!sector->csum) {
scrub_bitmap_clear_bit_error(stripe, sector_nr); return;
}
staticint calc_sector_number(struct scrub_stripe *stripe, struct bio_vec *first_bvec)
{ int i;
for (i = 0; i < stripe->nr_sectors; i++) { if (scrub_stripe_get_kaddr(stripe, i) == bvec_virt(first_bvec)) break;
}
ASSERT(i < stripe->nr_sectors); return i;
}
/* * Repair read is different to the regular read: * * - Only reads the failed sectors * - May have extra blocksize limits
*/ staticvoid scrub_repair_read_endio(struct btrfs_bio *bbio)
{ struct scrub_stripe *stripe = bbio->private; struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct bio_vec *bvec; int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
u32 bio_size = 0; int i;
staticvoid scrub_bio_add_sector(struct btrfs_bio *bbio, struct scrub_stripe *stripe, int sector_nr)
{ void *kaddr = scrub_stripe_get_kaddr(stripe, sector_nr); int ret;
ret = bio_add_page(&bbio->bio, virt_to_page(kaddr), bbio->fs_info->sectorsize,
offset_in_page(kaddr)); /* * Caller should ensure the bbio has enough size. * And we cannot use __bio_add_page(), which doesn't do any merge. * * Meanwhile for scrub_submit_initial_read() we fully rely on the merge * to create the minimal amount of bio vectors, for fs block size < page * size cases.
*/
ASSERT(ret == bbio->fs_info->sectorsize);
}
staticvoid scrub_stripe_submit_repair_read(struct scrub_stripe *stripe, int mirror, int blocksize, bool wait)
{ struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct btrfs_bio *bbio = NULL; constunsignedlong old_error_bitmap = scrub_bitmap_read_error(stripe); int i;
if (test_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state)) return;
/* * Init needed infos for error reporting. * * Although our scrub_stripe infrastructure is mostly based on btrfs_submit_bio() * thus no need for dev/physical, error reporting still needs dev and physical.
*/ if (!bitmap_empty(&errors->init_error_bitmap, stripe->nr_sectors)) {
u64 mapped_len = fs_info->sectorsize; struct btrfs_io_context *bioc = NULL; int stripe_index = stripe->mirror_num - 1; int ret;
/* For scrub, our mirror_num should always start at 1. */
ASSERT(stripe->mirror_num >= 1);
ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
stripe->logical, &mapped_len, &bioc,
NULL, NULL); /* * If we failed, dev will be NULL, and later detailed reports * will just be skipped.
*/ if (ret < 0) goto skip;
physical = bioc->stripes[stripe_index].physical;
dev = bioc->stripes[stripe_index].dev;
btrfs_put_bioc(bioc);
}
/* Good sector from the beginning, nothing need to be done. */ if (!test_bit(sector_nr, &errors->init_error_bitmap)) continue;
/* * Report error for the corrupted sectors. If repaired, just * output the message of repaired message.
*/ if (repaired) { if (dev) {
btrfs_err_rl(fs_info, "scrub: fixed up error at logical %llu on dev %s physical %llu",
stripe->logical, btrfs_dev_name(dev),
physical);
} else {
btrfs_err_rl(fs_info, "scrub: fixed up error at logical %llu on mirror %u",
stripe->logical, stripe->mirror_num);
} continue;
}
/* The remaining are all for unrepaired. */ if (dev) {
btrfs_err_rl(fs_info, "scrub: unable to fixup (regular) error at logical %llu on dev %s physical %llu",
stripe->logical, btrfs_dev_name(dev),
physical);
} else {
btrfs_err_rl(fs_info, "scrub: unable to fixup (regular) error at logical %llu on mirror %u",
stripe->logical, stripe->mirror_num);
}
if (scrub_bitmap_test_bit_io_error(stripe, sector_nr)) if (__ratelimit(&rs) && dev)
scrub_print_common_warning("i/o error", dev, false,
stripe->logical, physical); if (scrub_bitmap_test_bit_csum_error(stripe, sector_nr)) if (__ratelimit(&rs) && dev)
scrub_print_common_warning("checksum error", dev, false,
stripe->logical, physical); if (scrub_bitmap_test_bit_meta_error(stripe, sector_nr)) if (__ratelimit(&rs) && dev)
scrub_print_common_warning("header error", dev, false,
stripe->logical, physical); if (scrub_bitmap_test_bit_meta_gen_error(stripe, sector_nr)) if (__ratelimit(&rs) && dev)
scrub_print_common_warning("generation error", dev, false,
stripe->logical, physical);
}
/* Update the device stats. */ for (int i = 0; i < errors->nr_io_errors; i++)
btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_READ_ERRS); for (int i = 0; i < errors->nr_csum_errors; i++)
btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_CORRUPTION_ERRS); /* Generation mismatch error is based on each metadata, not each block. */ for (int i = 0; i < errors->nr_meta_gen_errors;
i += (fs_info->nodesize >> fs_info->sectorsize_bits))
btrfs_dev_stat_inc_and_print(stripe->dev, BTRFS_DEV_STAT_GENERATION_ERRS);
/* * The main entrance for all read related scrub work, including: * * - Wait for the initial read to finish * - Verify and locate any bad sectors * - Go through the remaining mirrors and try to read as large blocksize as * possible * - Go through all mirrors (including the failed mirror) sector-by-sector * - Submit writeback for repaired sectors * * Writeback for dev-replace does not happen here, it needs extra * synchronization for zoned devices.
*/ staticvoid scrub_stripe_read_repair_worker(struct work_struct *work)
{ struct scrub_stripe *stripe = container_of(work, struct scrub_stripe, work); struct scrub_ctx *sctx = stripe->sctx; struct btrfs_fs_info *fs_info = sctx->fs_info; struct scrub_error_records errors = { 0 }; int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
stripe->bg->length); unsignedlong repaired; unsignedlong error; int mirror; int i;
ASSERT(stripe->mirror_num > 0);
wait_scrub_stripe_io(stripe);
scrub_verify_one_stripe(stripe, scrub_bitmap_read_has_extent(stripe)); /* Save the initial failed bitmap for later repair and report usage. */
errors.init_error_bitmap = scrub_bitmap_read_error(stripe);
errors.nr_io_errors = scrub_bitmap_weight_io_error(stripe);
errors.nr_csum_errors = scrub_bitmap_weight_csum_error(stripe);
errors.nr_meta_errors = scrub_bitmap_weight_meta_error(stripe);
errors.nr_meta_gen_errors = scrub_bitmap_weight_meta_gen_error(stripe);
if (bitmap_empty(&errors.init_error_bitmap, stripe->nr_sectors)) goto out;
/* * Try all remaining mirrors. * * Here we still try to read as large block as possible, as this is * faster and we have extra safety nets to rely on.
*/ for (mirror = calc_next_mirror(stripe->mirror_num, num_copies);
mirror != stripe->mirror_num;
mirror = calc_next_mirror(mirror, num_copies)) { constunsignedlong old_error_bitmap = scrub_bitmap_read_error(stripe);
/* * Last safety net, try re-checking all mirrors, including the failed * one, sector-by-sector. * * As if one sector failed the drive's internal csum, the whole read * containing the offending sector would be marked as error. * Thus here we do sector-by-sector read. * * This can be slow, thus we only try it as the last resort.
*/
for (i = 0, mirror = stripe->mirror_num;
i < num_copies;
i++, mirror = calc_next_mirror(mirror, num_copies)) { constunsignedlong old_error_bitmap = scrub_bitmap_read_error(stripe);
scrub_stripe_submit_repair_read(stripe, mirror,
fs_info->sectorsize, true);
wait_scrub_stripe_io(stripe);
scrub_verify_one_stripe(stripe, old_error_bitmap); if (scrub_bitmap_empty_error(stripe)) goto out;
}
out:
error = scrub_bitmap_read_error(stripe); /* * Submit the repaired sectors. For zoned case, we cannot do repair * in-place, but queue the bg to be relocated.
*/
bitmap_andnot(&repaired, &errors.init_error_bitmap, &error,
stripe->nr_sectors); if (!sctx->readonly && !bitmap_empty(&repaired, stripe->nr_sectors)) { if (btrfs_is_zoned(fs_info)) {
btrfs_repair_one_zone(fs_info, sctx->stripes[0].bg->start);
} else {
scrub_write_sectors(sctx, stripe, repaired, false);
wait_scrub_stripe_io(stripe);
}
}
fill_writer_pointer_gap(sctx, stripe->physical + bio_off);
atomic_inc(&stripe->pending_io);
btrfs_submit_repair_write(bbio, stripe->mirror_num, dev_replace); if (!btrfs_is_zoned(fs_info)) return; /* * For zoned writeback, queue depth must be 1, thus we must wait for * the write to finish before the next write.
*/
wait_scrub_stripe_io(stripe);
/* * And also need to update the write pointer if write finished * successfully.
*/ if (!test_bit(bio_off >> fs_info->sectorsize_bits,
&stripe->write_error_bitmap))
sctx->write_pointer += bio_len;
}
/* * Submit the write bio(s) for the sectors specified by @write_bitmap. * * Here we utilize btrfs_submit_repair_write(), which has some extra benefits: * * - Only needs logical bytenr and mirror_num * Just like the scrub read path * * - Would only result in writes to the specified mirror * Unlike the regular writeback path, which would write back to all stripes * * - Handle dev-replace and read-repair writeback differently
*/ staticvoid scrub_write_sectors(struct scrub_ctx *sctx, struct scrub_stripe *stripe, unsignedlong write_bitmap, bool dev_replace)
{ struct btrfs_fs_info *fs_info = stripe->bg->fs_info; struct btrfs_bio *bbio = NULL; int sector_nr;
for_each_set_bit(sector_nr, &write_bitmap, stripe->nr_sectors) { /* We should only writeback sectors covered by an extent. */
ASSERT(scrub_bitmap_test_bit_has_extent(stripe, sector_nr));
/* Cannot merge with previous sector, submit the current one. */ if (bbio && sector_nr && !test_bit(sector_nr - 1, &write_bitmap)) {
scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
bbio = NULL;
} if (!bbio) {
bbio = btrfs_bio_alloc(stripe->nr_sectors, REQ_OP_WRITE,
fs_info, scrub_write_endio, stripe);
bbio->bio.bi_iter.bi_sector = (stripe->logical +
(sector_nr << fs_info->sectorsize_bits)) >>
SECTOR_SHIFT;
}
scrub_bio_add_sector(bbio, stripe, sector_nr);
} if (bbio)
scrub_submit_write_bio(sctx, stripe, bbio, dev_replace);
}
/* * Throttling of IO submission, bandwidth-limit based, the timeslice is 1 * second. Limit can be set via /sys/fs/UUID/devinfo/devid/scrub_speed_max.
*/ staticvoid scrub_throttle_dev_io(struct scrub_ctx *sctx, struct btrfs_device *device, unsignedint bio_size)
{ constint time_slice = 1000;
s64 delta;
ktime_t now;
u32 div;
u64 bwlimit;
bwlimit = READ_ONCE(device->scrub_speed_max); if (bwlimit == 0) return;
/* * Slice is divided into intervals when the IO is submitted, adjust by * bwlimit and maximum of 64 intervals.
*/
div = clamp(bwlimit / (16 * 1024 * 1024), 1, 64);
/* Start new epoch, set deadline */
now = ktime_get(); if (sctx->throttle_deadline == 0) {
sctx->throttle_deadline = ktime_add_ms(now, time_slice / div);
sctx->throttle_sent = 0;
}
/* Still in the time to send? */ if (ktime_before(now, sctx->throttle_deadline)) { /* If current bio is within the limit, send it */
sctx->throttle_sent += bio_size; if (sctx->throttle_sent <= div_u64(bwlimit, div)) return;
/* We're over the limit, sleep until the rest of the slice */
delta = ktime_ms_delta(sctx->throttle_deadline, now);
} else { /* New request after deadline, start new epoch */
delta = 0;
}
/* Next call will start the deadline period */
sctx->throttle_deadline = 0;
}
/* * Given a physical address, this will calculate it's * logical offset. if this is a parity stripe, it will return * the most left data stripe's logical offset. * * return 0 if it is a data stripe, 1 means parity stripe.
*/ staticint get_raid56_logic_offset(u64 physical, int num, struct btrfs_chunk_map *map, u64 *offset,
u64 *stripe_start)
{ int i; int j = 0;
u64 last_offset; constint data_stripes = nr_data_stripes(map);
/* Work out the disk rotation on this stripe-set */
rot = stripe_nr % map->num_stripes; /* calculate which stripe this data locates */
rot += i;
stripe_index = rot % map->num_stripes; if (stripe_index == num) return 0; if (stripe_index < num)
j++;
}
*offset = last_offset + btrfs_stripe_nr_to_offset(j); return 1;
}
/* * Return 0 if the extent item range covers any byte of the range. * Return <0 if the extent item is before @search_start. * Return >0 if the extent item is after @start_start + @search_len.
*/ staticint compare_extent_item_range(struct btrfs_path *path,
u64 search_start, u64 search_len)
{ struct btrfs_fs_info *fs_info = path->nodes[0]->fs_info;
u64 len; struct btrfs_key key;
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
ASSERT(key.type == BTRFS_EXTENT_ITEM_KEY ||
key.type == BTRFS_METADATA_ITEM_KEY); if (key.type == BTRFS_METADATA_ITEM_KEY)
len = fs_info->nodesize; else
len = key.offset;
if (key.objectid + len <= search_start) return -1; if (key.objectid >= search_start + search_len) return 1; return 0;
}
/* * Locate one extent item which covers any byte in range * [@search_start, @search_start + @search_length) * * If the path is not initialized, we will initialize the search by doing * a btrfs_search_slot(). * If the path is already initialized, we will use the path as the initial * slot, to avoid duplicated btrfs_search_slot() calls. * * NOTE: If an extent item starts before @search_start, we will still * return the extent item. This is for data extent crossing stripe boundary. * * Return 0 if we found such extent item, and @path will point to the extent item. * Return >0 if no such extent item can be found, and @path will be released. * Return <0 if hit fatal error, and @path will be released.
*/ staticint find_first_extent_item(struct btrfs_root *extent_root, struct btrfs_path *path,
u64 search_start, u64 search_len)
{ struct btrfs_fs_info *fs_info = extent_root->fs_info; struct btrfs_key key; int ret;
/* Continue using the existing path */ if (path->nodes[0]) goto search_forward;
ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0); if (ret < 0) return ret; if (ret == 0) { /* * Key with offset -1 found, there would have to exist an extent * item with such offset, but this is out of the valid range.
*/
btrfs_release_path(path); return -EUCLEAN;
}
/* * Here we intentionally pass 0 as @min_objectid, as there could be * an extent item starting before @search_start.
*/
ret = btrfs_previous_extent_item(extent_root, path, 0); if (ret < 0) return ret; /* * No matter whether we have found an extent item, the next loop will * properly do every check on the key.
*/
search_forward: while (true) {
btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid >= search_start + search_len) break; if (key.type != BTRFS_METADATA_ITEM_KEY &&
key.type != BTRFS_EXTENT_ITEM_KEY) goto next;
ret = compare_extent_item_range(path, search_start, search_len); if (ret == 0) return ret; if (ret > 0) break;
next:
ret = btrfs_next_item(extent_root, path); if (ret) { /* Either no more items or a fatal error. */
btrfs_release_path(path); return ret;
}
}
btrfs_release_path(path); return 1;
}
/* * Locate one stripe which has at least one extent in its range. * * Return 0 if found such stripe, and store its info into @stripe. * Return >0 if there is no such stripe in the specified range. * Return <0 for error.
*/ staticint scrub_find_fill_first_stripe(struct btrfs_block_group *bg, struct btrfs_path *extent_path, struct btrfs_path *csum_path, struct btrfs_device *dev, u64 physical, int mirror_num, u64 logical_start,
u32 logical_len, struct scrub_stripe *stripe)
{ struct btrfs_fs_info *fs_info = bg->fs_info; struct btrfs_root *extent_root = btrfs_extent_root(fs_info, bg->start); struct btrfs_root *csum_root = btrfs_csum_root(fs_info, bg->start); const u64 logical_end = logical_start + logical_len;
u64 cur_logical = logical_start;
u64 stripe_end;
u64 extent_start;
u64 extent_len;
u64 extent_flags;
u64 extent_gen; int ret;
if (unlikely(!extent_root || !csum_root)) {
btrfs_err(fs_info, "scrub: no valid extent or csum root found"); return -EUCLEAN;
}
memset(stripe->sectors, 0, sizeof(struct scrub_sector_verification) *
stripe->nr_sectors);
scrub_stripe_reset_bitmaps(stripe);
/* The range must be inside the bg. */
ASSERT(logical_start >= bg->start && logical_end <= bg->start + bg->length);
ret = find_first_extent_item(extent_root, extent_path, logical_start,
logical_len); /* Either error or not found. */ if (ret) goto out;
get_extent_info(extent_path, &extent_start, &extent_len, &extent_flags,
&extent_gen); if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
stripe->nr_meta_extents++; if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
stripe->nr_data_extents++;
cur_logical = max(extent_start, cur_logical);
/* * Round down to stripe boundary. * * The extra calculation against bg->start is to handle block groups * whose logical bytenr is not BTRFS_STRIPE_LEN aligned.
*/
stripe->logical = round_down(cur_logical - bg->start, BTRFS_STRIPE_LEN) +
bg->start;
stripe->physical = physical + stripe->logical - logical_start;
stripe->dev = dev;
stripe->bg = bg;
stripe->mirror_num = mirror_num;
stripe_end = stripe->logical + BTRFS_STRIPE_LEN - 1;
/* Fill the first extent info into stripe->sectors[] array. */
fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
extent_flags, extent_gen);
cur_logical = extent_start + extent_len;
/* Fill the extent info for the remaining sectors. */ while (cur_logical <= stripe_end) {
ret = find_first_extent_item(extent_root, extent_path, cur_logical,
stripe_end - cur_logical + 1); if (ret < 0) goto out; if (ret > 0) {
ret = 0; break;
}
get_extent_info(extent_path, &extent_start, &extent_len,
&extent_flags, &extent_gen); if (extent_flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
stripe->nr_meta_extents++; if (extent_flags & BTRFS_EXTENT_FLAG_DATA)
stripe->nr_data_extents++;
fill_one_extent_info(fs_info, stripe, extent_start, extent_len,
extent_flags, extent_gen);
cur_logical = extent_start + extent_len;
}
/* Now fill the data csum. */ if (bg->flags & BTRFS_BLOCK_GROUP_DATA) { int sector_nr; unsignedlong csum_bitmap = 0;
/* Csum space should have already been allocated. */
ASSERT(stripe->csums);
/* * Our csum bitmap should be large enough, as BTRFS_STRIPE_LEN * should contain at most 16 sectors.
*/
ASSERT(BITS_PER_LONG >= BTRFS_STRIPE_LEN >> fs_info->sectorsize_bits);
ret = btrfs_lookup_csums_bitmap(csum_root, csum_path,
stripe->logical, stripe_end,
stripe->csums, &csum_bitmap); if (ret < 0) goto out; if (ret > 0)
ret = 0;
for_each_set_bit(i, &has_extent, stripe->nr_sectors) { /* We're beyond the chunk boundary, no need to read anymore. */ if (i >= nr_sectors) break;
/* The current sector cannot be merged, submit the bio. */ if (bbio &&
((i > 0 && !test_bit(i - 1, &has_extent)) ||
bbio->bio.bi_iter.bi_size >= stripe_len)) {
ASSERT(bbio->bio.bi_iter.bi_size);
atomic_inc(&stripe->pending_io);
btrfs_submit_bbio(bbio, mirror);
bbio = NULL;
}
if (!bbio) { struct btrfs_io_stripe io_stripe = {}; struct btrfs_io_context *bioc = NULL; const u64 logical = stripe->logical +
(i << fs_info->sectorsize_bits); int ret;
io_stripe.rst_search_commit_root = true;
stripe_len = (nr_sectors - i) << fs_info->sectorsize_bits; /* * For RST cases, we need to manually split the bbio to * follow the RST boundary.
*/
ret = btrfs_map_block(fs_info, BTRFS_MAP_READ, logical,
&stripe_len, &bioc, &io_stripe, &mirror);
btrfs_put_bioc(bioc); if (ret < 0) { if (ret != -ENODATA) { /* * Earlier btrfs_get_raid_extent_offset() * returned -ENODATA, which means there's * no entry for the corresponding range * in the stripe tree. But if it's in * the extent tree, then it's a preallocated * extent and not an error.
*/
scrub_bitmap_set_bit_io_error(stripe, i);
scrub_bitmap_set_bit_error(stripe, i);
} continue;
}
bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT; /* Read the whole range inside the chunk boundary. */ for (unsignedint cur = 0; cur < nr_sectors; cur++)
scrub_bio_add_sector(bbio, stripe, cur);
atomic_inc(&stripe->pending_io);
/* * For dev-replace, either user asks to avoid the source dev, or * the device is missing, we try the next mirror instead.
*/ if (sctx->is_dev_replace &&
(fs_info->dev_replace.cont_reading_from_srcdev_mode ==
BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID ||
!stripe->dev->bdev)) { int num_copies = btrfs_num_copies(fs_info, stripe->bg->start,
stripe->bg->length);
scrub_throttle_dev_io(sctx, sctx->stripes[0].dev,
btrfs_stripe_nr_to_offset(nr_stripes));
blk_start_plug(&plug); for (int i = 0; i < nr_stripes; i++) { struct scrub_stripe *stripe = &sctx->stripes[first_slot + i];
/* Those stripes should be initialized. */
ASSERT(test_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state));
scrub_submit_initial_read(sctx, stripe);
}
blk_finish_plug(&plug);
}
/* Submit the stripes which are populated but not submitted. */ if (nr_stripes % SCRUB_STRIPES_PER_GROUP) { constint first_slot = round_down(nr_stripes, SCRUB_STRIPES_PER_GROUP);
/* Submit for dev-replace. */ if (sctx->is_dev_replace) { /* * For dev-replace, if we know there is something wrong with * metadata, we should immediately abort.
*/ for (int i = 0; i < nr_stripes; i++) { if (stripe_has_metadata_error(&sctx->stripes[i])) {
ret = -EIO; goto out;
}
} for (int i = 0; i < nr_stripes; i++) { unsignedlong good; unsignedlong has_extent; unsignedlong error;
/* * There should always be one slot left, as caller filling the last * slot should flush them all.
*/
ASSERT(sctx->cur_stripe < SCRUB_TOTAL_STRIPES);
/* @found_logical_ret must be specified. */
ASSERT(found_logical_ret);
stripe = &sctx->stripes[sctx->cur_stripe];
scrub_reset_stripe(stripe);
ret = scrub_find_fill_first_stripe(bg, &sctx->extent_path,
&sctx->csum_path, dev, physical,
mirror_num, logical, length, stripe); /* Either >0 as no more extents or <0 for error. */ if (ret) return ret;
*found_logical_ret = stripe->logical;
sctx->cur_stripe++;
/* We filled one group, submit it. */ if (sctx->cur_stripe % SCRUB_STRIPES_PER_GROUP == 0) { constint first_slot = sctx->cur_stripe - SCRUB_STRIPES_PER_GROUP;
/* * For data stripe search, we cannot reuse the same extent/csum paths, * as the data stripe bytenr may be smaller than previous extent. Thus * we have to use our own extent/csum paths.
*/
extent_path.search_commit_root = 1;
extent_path.skip_locking = 1;
csum_path.search_commit_root = 1;
csum_path.skip_locking = 1;
for (int i = 0; i < data_stripes; i++) { int stripe_index; int rot;
u64 physical;
scrub_reset_stripe(stripe);
set_bit(SCRUB_STRIPE_FLAG_NO_REPORT, &stripe->state);
ret = scrub_find_fill_first_stripe(bg, &extent_path, &csum_path,
map->stripes[stripe_index].dev, physical, 1,
full_stripe_start + btrfs_stripe_nr_to_offset(i),
BTRFS_STRIPE_LEN, stripe); if (ret < 0) goto out; /* * No extent in this data stripe, need to manually mark them * initialized to make later read submission happy.
*/ if (ret > 0) {
stripe->logical = full_stripe_start +
btrfs_stripe_nr_to_offset(i);
stripe->dev = map->stripes[stripe_index].dev;
stripe->mirror_num = 1;
set_bit(SCRUB_STRIPE_FLAG_INITIALIZED, &stripe->state);
}
}
/* Check if all data stripes are empty. */ for (int i = 0; i < data_stripes; i++) {
stripe = &sctx->raid56_data_stripes[i]; if (!scrub_bitmap_empty_has_extent(stripe)) {
all_empty = false; break;
}
} if (all_empty) {
ret = 0; goto out;
}
for (int i = 0; i < data_stripes; i++) {
stripe = &sctx->raid56_data_stripes[i];
scrub_submit_initial_read(sctx, stripe);
} for (int i = 0; i < data_stripes; i++) {
stripe = &sctx->raid56_data_stripes[i];
wait_event(stripe->repair_wait,
test_bit(SCRUB_STRIPE_FLAG_REPAIR_DONE, &stripe->state));
} /* For now, no zoned support for RAID56. */
ASSERT(!btrfs_is_zoned(sctx->fs_info));
/* * Now all data stripes are properly verified. Check if we have any * unrepaired, if so abort immediately or we could further corrupt the * P/Q stripes. * * During the loop, also populate extent_bitmap.
*/ for (int i = 0; i < data_stripes; i++) { unsignedlong error; unsignedlong has_extent;
/* * We should only check the errors where there is an extent. * As we may hit an empty data stripe while it's missing.
*/
bitmap_and(&error, &error, &has_extent, stripe->nr_sectors); if (!bitmap_empty(&error, stripe->nr_sectors)) {
btrfs_err(fs_info, "scrub: unrepaired sectors detected, full stripe %llu data stripe %u errors %*pbl",
full_stripe_start, i, stripe->nr_sectors,
&error);
ret = -EIO; goto out;
}
bitmap_or(&extent_bitmap, &extent_bitmap, &has_extent,
stripe->nr_sectors);
}
/* Now we can check and regenerate the P/Q stripe. */
bio = bio_alloc(NULL, 1, REQ_OP_READ, GFP_NOFS);
bio->bi_iter.bi_sector = full_stripe_start >> SECTOR_SHIFT;
bio->bi_private = &io_done;
bio->bi_end_io = raid56_scrub_wait_endio;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.