/* * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which * can be used as index to access btrfs_raid_array[].
*/ enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
{ const u64 profile = (flags & BTRFS_BLOCK_GROUP_PROFILE_MASK);
if (!profile) return BTRFS_RAID_SINGLE;
return BTRFS_BG_FLAG_TO_INDEX(profile);
}
constchar *btrfs_bg_type_to_raid_name(u64 flags)
{ constint index = btrfs_bg_flags_to_raid_index(flags);
if (index >= BTRFS_NR_RAID_TYPES) return NULL;
return btrfs_raid_array[index].raid_name;
}
int btrfs_nr_parity_stripes(u64 type)
{ enum btrfs_raid_types index = btrfs_bg_flags_to_raid_index(type);
return btrfs_raid_array[index].nparity;
}
/* * Fill @buf with textual description of @bg_flags, no more than @size_buf * bytes including terminating null byte.
*/ void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
{ int i; int ret; char *bp = buf;
u64 flags = bg_flags;
u32 size_bp = size_buf;
if (!flags) return;
#define DESCRIBE_FLAG(flag, desc) \ do { \ if (flags & (flag)) { \
ret = snprintf(bp, size_bp, "%s|", (desc)); \ if (ret < 0 || ret >= size_bp) \ goto out_overflow; \
size_bp -= ret; \
bp += ret; \
flags &= ~(flag); \
} \
} while (0)
DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single"); for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
btrfs_raid_array[i].raid_name); #undef DESCRIBE_FLAG
if (flags) {
ret = snprintf(bp, size_bp, "0x%llx|", flags);
size_bp -= ret;
}
if (size_bp < size_buf)
buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
/* * The text is trimmed, it's up to the caller to provide sufficiently * large buffer
*/
out_overflow:;
}
/* * Device locking * ============== * * There are several mutexes that protect manipulation of devices and low-level * structures like chunks but not block groups, extents or files * * uuid_mutex (global lock) * ------------------------ * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from * the SCAN_DEV ioctl registration or from mount either implicitly (the first * device) or requested by the device= mount option * * the mutex can be very coarse and can cover long-running operations * * protects: updates to fs_devices counters like missing devices, rw devices, * seeding, structure cloning, opening/closing devices at mount/umount time * * global::fs_devs - add, remove, updates to the global list * * does not protect: manipulation of the fs_devices::devices list in general * but in mount context it could be used to exclude list modifications by eg. * scan ioctl * * btrfs_device::name - renames (write side), read is RCU * * fs_devices::device_list_mutex (per-fs, with RCU) * ------------------------------------------------ * protects updates to fs_devices::devices, ie. adding and deleting * * simple list traversal with read-only actions can be done with RCU protection * * may be used to exclude some operations from running concurrently without any * modifications to the list (see write_all_supers) * * Is not required at mount and close times, because our device list is * protected by the uuid_mutex at that point. * * balance_mutex * ------------- * protects balance structures (status, state) and context accessed from * several places (internally, ioctl) * * chunk_mutex * ----------- * protects chunks, adding or removing during allocation, trim or when a new * device is added/removed. Additionally it also protects post_commit_list of * individual devices, since they can be added to the transaction's * post_commit_list only with chunk_mutex held. * * cleaner_mutex * ------------- * a big lock that is held by the cleaner thread and prevents running subvolume * cleaning together with relocation or delayed iputs * * * Lock nesting * ============ * * uuid_mutex * device_list_mutex * chunk_mutex * balance_mutex * * * Exclusive operations * ==================== * * Maintains the exclusivity of the following operations that apply to the * whole filesystem and cannot run in parallel. * * - Balance (*) * - Device add * - Device remove * - Device replace (*) * - Resize * * The device operations (as above) can be in one of the following states: * * - Running state * - Paused state * - Completed state * * Only device operations marked with (*) can go into the Paused state for the * following reasons: * * - ioctl (only Balance can be Paused through ioctl) * - filesystem remounted as read-only * - filesystem unmounted and mounted as read-only * - system power-cycle and filesystem mounted as read-only * - filesystem or device errors leading to forced read-only * * The status of exclusive operation is set and cleared atomically. * During the course of Paused state, fs_info::exclusive_operation remains set. * A device operation in Paused or Running state can be canceled or resumed * either by ioctl (Balance only) or when remounted as read-write. * The exclusive status is cleared when the device operation is canceled or * completed.
*/
/* * Allocate new btrfs_fs_devices structure identified by a fsid. * * @fsid: if not NULL, copy the UUID to fs_devices::fsid and to * fs_devices::metadata_fsid * * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR(). * The returned struct is not linked onto any lists and can be destroyed with * kfree() right away.
*/ staticstruct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid)
{ struct btrfs_fs_devices *fs_devs;
fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL); if (!fs_devs) return ERR_PTR(-ENOMEM);
if (fsid) {
memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
}
return fs_devs;
}
staticvoid btrfs_free_device(struct btrfs_device *device)
{
WARN_ON(!list_empty(&device->post_commit_list)); /* * No need to call kfree_rcu() nor do RCU lock/unlock, nothing is * reading the device name.
*/
kfree(rcu_dereference_raw(device->name));
btrfs_extent_io_tree_release(&device->alloc_state);
btrfs_destroy_dev_zone_info(device);
kfree(device);
}
if (IS_ERR(*bdev_file)) {
ret = PTR_ERR(*bdev_file);
btrfs_err(NULL, "failed to open device for path %s with flags 0x%x: %d",
device_path, flags, ret); goto error;
}
bdev = file_bdev(*bdev_file);
if (flush)
sync_blockdev(bdev); if (holder) {
ret = set_blocksize(*bdev_file, BTRFS_BDEV_BLOCKSIZE); if (ret) {
bdev_fput(*bdev_file); goto error;
}
}
invalidate_bdev(bdev);
*disk_super = btrfs_read_disk_super(bdev, 0, false); if (IS_ERR(*disk_super)) {
ret = PTR_ERR(*disk_super);
bdev_fput(*bdev_file); goto error;
}
/* * Search and remove all stale devices (which are not mounted). When both * inputs are NULL, it will search and release all stale devices. * * @devt: Optional. When provided will it release all unmounted devices * matching this devt only. * @skip_device: Optional. Will skip this device when searching for the stale * devices. * * Return: 0 for success or if @devt is 0. * -EBUSY if @devt is a mounted device. * -ENOENT if @devt does not match any device in the list.
*/ staticint btrfs_free_stale_devices(dev_t devt, struct btrfs_device *skip_device)
{ struct btrfs_fs_devices *fs_devices, *tmp_fs_devices; struct btrfs_device *device, *tmp_device; int ret; bool freed = false;
lockdep_assert_held(&uuid_mutex);
/* Return good status if there is no instance of devt. */
ret = 0;
list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
mutex_lock(&fs_devices->device_list_mutex);
list_for_each_entry_safe(device, tmp_device,
&fs_devices->devices, dev_list) { if (skip_device && skip_device == device) continue; if (devt && devt != device->devt) continue; if (fs_devices->opened || fs_devices->holding) { if (devt)
ret = -EBUSY; break;
}
/* delete the stale device */
fs_devices->num_devices--;
list_del(&device->dev_list);
btrfs_free_device(device);
/* Find the fs_device by the usual method, if found use it. */
fsid_fs_devices = find_fsid(disk_super->fsid,
has_metadata_uuid ? disk_super->metadata_uuid : NULL);
/* The temp_fsid feature is supported only with single device filesystem. */ if (btrfs_super_num_devices(disk_super) != 1) return fsid_fs_devices;
/* * A seed device is an integral component of the sprout device, which * functions as a multi-device filesystem. So, temp-fsid feature is * not supported.
*/ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) return fsid_fs_devices;
/* Try to find a fs_devices by matching devt. */
list_for_each_entry(devt_fs_devices, &fs_uuids, fs_list) { struct btrfs_device *device;
list_for_each_entry(device, &devt_fs_devices->devices, dev_list) { if (device->devt == devt) {
found_by_devt = true; break;
}
} if (found_by_devt) break;
}
if (found_by_devt) { /* Existing device. */ if (fsid_fs_devices == NULL) { if (devt_fs_devices->opened == 0) { /* Stale device. */ return NULL;
} else { /* temp_fsid is mounting a subvol. */ return devt_fs_devices;
}
} else { /* Regular or temp_fsid device mounting a subvol. */ return devt_fs_devices;
}
} else { /* New device. */ if (fsid_fs_devices == NULL) { return NULL;
} else { /* sb::fsid is already used create a new temp_fsid. */
*same_fsid_diff_dev = true; return NULL;
}
}
/* Not reached. */
}
/* * This is only used on mount, and we are protected from competing things * messing with our fs_devices by the uuid_mutex, thus we do not need the * fs_devices->device_list_mutex here.
*/ staticint btrfs_open_one_device(struct btrfs_fs_devices *fs_devices, struct btrfs_device *device, blk_mode_t flags, void *holder)
{ struct file *bdev_file; struct btrfs_super_block *disk_super;
u64 devid; int ret;
if (device->bdev) return -EINVAL; if (!device->name) return -EINVAL;
ret = btrfs_get_bdev_and_sb(rcu_dereference_raw(device->name), flags, holder, 1,
&bdev_file, &disk_super); if (ret) return ret;
devid = btrfs_stack_device_id(&disk_super->dev_item); if (devid != device->devid) goto error_free_page;
if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE)) goto error_free_page;
/* * When FS is already mounted. * 1. If you are here and if the device->name is NULL that * means this device was missing at time of FS mount. * 2. If you are here and if the device->name is different * from 'path' that means either * a. The same device disappeared and reappeared with * different name. or * b. The missing-disk-which-was-replaced, has * reappeared now. * * We must allow 1 and 2a above. But 2b would be a spurious * and unintentional. * * Further in case of 1 and 2a above, the disk at 'path' * would have missed some transaction when it was away and * in case of 2a the stale bdev has to be updated as well. * 2b must not be allowed at all time.
*/
/* * For now, we do allow update to btrfs_fs_device through the * btrfs dev scan cli after FS has been mounted. We're still * tracking a problem where systems fail mount by subvolume id * when we reject replacement on a mounted FS.
*/ if (!fs_devices->opened && found_transid < device->generation) { /* * That is if the FS is _not_ mounted and if you * are here, that means there is more than one * disk with same uuid and devid.We keep the one * with larger generation number or the last-in if * generation are equal.
*/
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_err(NULL, "device %s already registered with a higher generation, found %llu expect %llu",
path, found_transid, device->generation); return ERR_PTR(-EEXIST);
}
/* * We are going to replace the device path for a given devid, * make sure it's the same device if the device is mounted * * NOTE: the device->fs_info may not be reliable here so pass * in a NULL to message helpers instead. This avoids a possible * use-after-free when the fs_info and fs_info->sb are already * torn down.
*/ if (device->bdev) { if (device->devt != path_devt) {
mutex_unlock(&fs_devices->device_list_mutex);
btrfs_warn(NULL, "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
path, devid, found_transid,
current->comm,
task_pid_nr(current)); return ERR_PTR(-EEXIST);
}
btrfs_info(NULL, "devid %llu device path %s changed to %s scanned by %s (%d)",
devid, btrfs_dev_name(device),
path, current->comm,
task_pid_nr(current));
}
name = kstrdup(path, GFP_NOFS); if (!name) {
mutex_unlock(&fs_devices->device_list_mutex); return ERR_PTR(-ENOMEM);
}
rcu_read_lock();
old_name = rcu_dereference(device->name);
rcu_read_unlock();
rcu_assign_pointer(device->name, name);
kfree_rcu_mightsleep(old_name);
/* * Unmount does not free the btrfs_device struct but would zero * generation along with most of the other members. So just update * it back. We need it to pick the disk with largest generation * (as above).
*/ if (!fs_devices->opened) {
device->generation = found_transid;
fs_devices->latest_generation = max_t(u64, found_transid,
fs_devices->latest_generation);
}
/* * This is ok to do without RCU read locked because we hold the * uuid mutex so nothing we touch in here is going to disappear.
*/ if (orig_dev->name)
dev_path = rcu_dereference_raw(orig_dev->name);
device = btrfs_alloc_device(NULL, &orig_dev->devid,
orig_dev->uuid, dev_path); if (IS_ERR(device)) {
ret = PTR_ERR(device); goto error;
}
if (orig_dev->zone_info) { struct btrfs_zoned_device_info *zone_info;
zone_info = btrfs_clone_dev_zone_info(orig_dev); if (!zone_info) {
btrfs_free_device(device);
ret = -ENOMEM; goto error;
}
device->zone_info = zone_info;
}
/* This is the initialized path, it is safe to release the devices. */
list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) { if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
&device->dev_state) &&
!test_bit(BTRFS_DEV_STATE_MISSING,
&device->dev_state) &&
(!*latest_dev ||
device->generation > (*latest_dev)->generation)) {
*latest_dev = device;
} continue;
}
/* * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID, * in btrfs_init_dev_replace() so just continue.
*/ if (device->devid == BTRFS_DEV_REPLACE_DEVID) continue;
/* * After we have read the system tree and know devids belonging to this * filesystem, remove the device which does not belong there.
*/ void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
{ struct btrfs_device *latest_dev = NULL; struct btrfs_fs_devices *seed_dev;
/* * Reset the flush error record. We might have a transient flush error * in this mount, and if so we aborted the current transaction and set * the fs to an error state, guaranteeing no super blocks can be further * committed. However that error might be transient and if we unmount the * filesystem and mount it again, we should allow the mount to succeed * (btrfs_check_rw_degradable() should not fail) - if after mounting the * filesystem again we still get flush errors, then we will again abort * any transaction and set the error state, guaranteeing no commits of * unsafe super blocks.
*/
device->last_flush_error = 0;
/* Verify the device is back in a pristine state */
WARN_ON(test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
WARN_ON(!list_empty(&device->dev_alloc_list));
WARN_ON(!list_empty(&device->post_commit_list));
}
mutex_lock(&uuid_mutex);
close_fs_devices(fs_devices); if (!fs_devices->opened && !fs_devices->holding) {
list_splice_init(&fs_devices->seed_list, &list);
/* * If the struct btrfs_fs_devices is not assembled with any * other device, it can be re-initialized during the next mount * without the needing device-scan step. Therefore, it can be * fully freed.
*/ if (fs_devices->num_devices == 1) {
list_del(&fs_devices->fs_list);
free_fs_devices(fs_devices);
}
}
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
blk_mode_t flags, void *holder)
{ int ret;
lockdep_assert_held(&uuid_mutex); /* * The device_list_mutex cannot be taken here in case opening the * underlying device takes further locks like open_mutex. * * We also don't need the lock here as this is called during mount and * exclusion is provided by uuid_mutex
*/
if (fs_devices->opened) {
fs_devices->opened++;
ret = 0;
} else {
list_sort(NULL, &fs_devices->devices, devid_cmp);
ret = open_fs_devices(fs_devices, flags, holder);
}
bytenr_orig = btrfs_sb_offset(copy_num);
ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr); if (ret < 0) { if (ret == -ENOENT)
ret = -EINVAL; return ERR_PTR(ret);
}
if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev)) return ERR_PTR(-EINVAL);
if (drop_cache) { /* This should only be called with the primary sb. */
ASSERT(copy_num == 0);
/* * Drop the page of the primary superblock, so later read will * always read from the device.
*/
invalidate_inode_pages2_range(mapping, bytenr >> PAGE_SHIFT,
(bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT);
}
super = page_address(page); if (btrfs_super_magic(super) != BTRFS_MAGIC ||
btrfs_super_bytenr(super) != bytenr_orig) {
btrfs_release_disk_super(super); return ERR_PTR(-EINVAL);
}
/* * Make sure the last byte of label is properly NUL termiated. We use * '%s' to print the label, if not properly NUL termiated we can access * beyond the label.
*/ if (super->label[0] && super->label[BTRFS_LABEL_SIZE - 1])
super->label[BTRFS_LABEL_SIZE - 1] = 0;
return super;
}
int btrfs_forget_devices(dev_t devt)
{ int ret;
mutex_lock(&uuid_mutex);
ret = btrfs_free_stale_devices(devt, NULL);
mutex_unlock(&uuid_mutex);
/* * Do not skip device registration for mounted devices with matching * maj:min but different paths. Booting without initrd relies on * /dev/root initially, later replaced with the actual root device. * A successful scan ensures grub2-probe selects the correct device.
*/
list_for_each_entry(fs_devices, &fs_uuids, fs_list) { struct btrfs_device *device;
mutex_lock(&fs_devices->device_list_mutex);
if (!fs_devices->opened) {
mutex_unlock(&fs_devices->device_list_mutex); continue;
}
/* * Look for a btrfs signature on a device. This may be called out of the mount path * and we are not allowed to call set_blocksize during the scan. The superblock * is read via pagecache. * * With @mount_arg_dev it's a scan during mount time that will always register * the device or return an error. Multi-device and seeding devices are registered * in both cases.
*/ struct btrfs_device *btrfs_scan_one_device(constchar *path, bool mount_arg_dev)
{ struct btrfs_super_block *disk_super; bool new_device_added = false; struct btrfs_device *device = NULL; struct file *bdev_file;
dev_t devt;
lockdep_assert_held(&uuid_mutex);
/* * Avoid an exclusive open here, as the systemd-udev may initiate the * device scan which may race with the user's mount or mkfs command, * resulting in failure. * Since the device scan is solely for reading purposes, there is no * need for an exclusive open. Additionally, the devices are read again * during the mount process. It is ok to get some inconsistent * values temporarily, as the device paths of the fsid are the only * required information for assembling the volume.
*/
bdev_file = bdev_file_open_by_path(path, BLK_OPEN_READ, NULL, NULL); if (IS_ERR(bdev_file)) return ERR_CAST(bdev_file);
/* * Try to find a chunk that intersects [start, start + len] range and when one * such is found, record the end of it in *start
*/ staticbool contains_pending_extent(struct btrfs_device *device, u64 *start,
u64 len)
{
u64 physical_start, physical_end;
static u64 dev_extent_search_start(struct btrfs_device *device)
{ switch (device->fs_devices->chunk_alloc_policy) { default:
btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy);
fallthrough; case BTRFS_CHUNK_ALLOC_REGULAR: return BTRFS_DEVICE_RANGE_RESERVED; case BTRFS_CHUNK_ALLOC_ZONED: /* * We don't care about the starting region like regular * allocator, because we anyway use/reserve the first two zones * for superblock logging.
*/ return 0;
}
}
/* * Check if specified hole is suitable for allocation. * * @device: the device which we have the hole * @hole_start: starting position of the hole * @hole_size: the size of the hole * @num_bytes: the size of the free space that we need * * This function may modify @hole_start and @hole_size to reflect the suitable * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
*/ staticbool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
u64 *hole_size, u64 num_bytes)
{ bool changed = false;
u64 hole_end = *hole_start + *hole_size;
for (;;) { /* * Check before we set max_hole_start, otherwise we could end up * sending back this offset anyway.
*/ if (contains_pending_extent(device, hole_start, *hole_size)) { if (hole_end >= *hole_start)
*hole_size = hole_end - *hole_start; else
*hole_size = 0;
changed = true;
}
switch (device->fs_devices->chunk_alloc_policy) { default:
btrfs_warn_unknown_chunk_allocation(device->fs_devices->chunk_alloc_policy);
fallthrough; case BTRFS_CHUNK_ALLOC_REGULAR: /* No extra check */ break; case BTRFS_CHUNK_ALLOC_ZONED: if (dev_extent_hole_check_zoned(device, hole_start,
hole_size, num_bytes)) {
changed = true; /* * The changed hole can contain pending extent. * Loop again to check that.
*/ continue;
} break;
}
break;
}
return changed;
}
/* * Find free space in the specified device. * * @device: the device which we search the free space in * @num_bytes: the size of the free space that we need * @search_start: the position from which to begin the search * @start: store the start of the free space. * @len: the size of the free space. that we find, or the size * of the max free space if we don't find suitable free space * * This does a pretty simple search, the expectation is that it is called very * infrequently and that a given device has a small number of extents. * * @start is used to store the start of the free space if we find. But if we * don't find suitable free space, it will be used to store the start position * of the max free space. * * @len is used to store the size of the free space that we find. * But if we don't find suitable free space, it is used to store the size of * the max free space. * * NOTE: This function will search *commit* root of device tree, and does extra * check to ensure dev extents are not double allocated. * This makes the function safe to allocate dev extents but may not report * correct usable device space, as device extent freed in current transaction * is not reported as available.
*/ staticint find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
u64 *start, u64 *len)
{ struct btrfs_fs_info *fs_info = device->fs_info; struct btrfs_root *root = fs_info->dev_root; struct btrfs_key key; struct btrfs_dev_extent *dev_extent; struct btrfs_path *path;
u64 search_start;
u64 hole_size;
u64 max_hole_start;
u64 max_hole_size = 0;
u64 extent_end;
u64 search_end = device->total_bytes; int ret; int slot; struct extent_buffer *l;
/* * If this free space is greater than which we need, * it must be the max free space that we have found * until now, so max_hole_start must point to the start * of this free space and the length of this free space * is stored in max_hole_size. Thus, we return * max_hole_start and max_hole_size and go back to the * caller.
*/ if (hole_size >= num_bytes) {
ret = 0; goto out;
}
}
/* * At this point, search_start should be the end of * allocated dev extents, and when shrinking the device, * search_end may be smaller than search_start.
*/ if (search_end > search_start) {
hole_size = search_end - search_start; if (dev_extent_hole_check(device, &search_start, &hole_size,
num_bytes)) {
btrfs_release_path(path); goto again;
}
/* * the device information is stored in the chunk root * the btrfs_device struct should be fully filled in
*/ staticint btrfs_add_dev_item(struct btrfs_trans_handle *trans, struct btrfs_device *device)
{ int ret; struct btrfs_path *path; struct btrfs_dev_item *dev_item; struct extent_buffer *leaf; struct btrfs_key key; unsignedlong ptr;
path = btrfs_alloc_path(); if (!path) return -ENOMEM;
ret = 0;
out:
btrfs_free_path(path); return ret;
}
/* * Function to update ctime/mtime for a given device path. * Mainly used for ctime/mtime based probe like libblkid. * * We don't care about errors here, this is just to be kind to userspace.
*/ staticvoid update_dev_time(constchar *device_path)
{ struct path path; int ret;
ret = kern_path(device_path, LOOKUP_FOLLOW, &path); if (ret) return;
btrfs_reserve_chunk_metadata(trans, false);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
btrfs_trans_release_chunk_metadata(trans); if (ret) { if (ret > 0)
ret = -ENOENT; goto out;
}
ret = btrfs_del_item(trans, root, path);
out:
btrfs_free_path(path); return ret;
}
/* * Verify that @num_devices satisfies the RAID profile constraints in the whole * filesystem. It's up to the caller to adjust that number regarding eg. device * replace.
*/ staticint btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
u64 num_devices)
{
u64 all_avail; unsigned seq; int i;
do {
seq = read_seqbegin(&fs_info->profiles_lock);
/* * Helper function to check if the given device is part of s_bdev / latest_dev * and replace it with the provided or the next active device, in the context * where this function called, there should be always be another device (or * this_dev) which is active.
*/ void __cold btrfs_assign_next_active_device(struct btrfs_device *device, struct btrfs_device *next_device)
{ struct btrfs_fs_info *fs_info = device->fs_info;
if (!next_device)
next_device = btrfs_find_next_active_device(fs_info->fs_devices,
device);
ASSERT(next_device);
if (fs_info->sb->s_bdev &&
(fs_info->sb->s_bdev == device->bdev))
fs_info->sb->s_bdev = next_device->bdev;
if (fs_info->fs_devices->latest_dev->bdev == device->bdev)
fs_info->fs_devices->latest_dev = next_device;
}
/* * Return btrfs_fs_devices::num_devices excluding the device that's being * currently replaced.
*/ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
{
u64 num_devices = fs_info->fs_devices->num_devices;
if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) {
btrfs_err(fs_info, "device remove not supported on extent tree v2 yet"); return -EINVAL;
}
/* * The device list in fs_devices is accessed without locks (neither * uuid_mutex nor device_list_mutex) as it won't change on a mounted * filesystem and another device rm cannot run.
*/
num_devices = btrfs_num_devices(fs_info);
ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1); if (ret) return ret;
device = btrfs_find_device(fs_info->fs_devices, args); if (!device) { if (args->missing)
ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; else
ret = -ENOENT; return ret;
}
if (btrfs_pinned_by_swapfile(fs_info, device)) {
btrfs_warn(fs_info, "cannot remove device %s (devid %llu) due to active swapfile",
btrfs_dev_name(device), device->devid); return -ETXTBSY;
}
if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) return BTRFS_ERROR_DEV_TGT_REPLACE;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
fs_info->fs_devices->rw_devices == 1) return BTRFS_ERROR_DEV_ONLY_WRITABLE;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
mutex_lock(&fs_info->chunk_mutex);
list_del_init(&device->dev_alloc_list);
device->fs_devices->rw_devices--;
mutex_unlock(&fs_info->chunk_mutex);
}
ret = btrfs_shrink_device(device, 0); if (ret) goto error_undo;
trans = btrfs_start_transaction(fs_info->chunk_root, 0); if (IS_ERR(trans)) {
ret = PTR_ERR(trans); goto error_undo;
}
ret = btrfs_rm_dev_item(trans, device); if (ret) { /* Any error in dev item removal is critical */
btrfs_crit(fs_info, "failed to remove device item for devid %llu: %d",
device->devid, ret);
btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans); return ret;
}
/* * the device list mutex makes sure that we don't change * the device list while someone else is writing out all * the device supers. Whoever is writing all supers, should * lock the device list mutex before getting the number of * devices in the super block (super_copy). Conversely, * whoever updates the number of devices in the super block * (super_copy) should hold the device list mutex.
*/
/* * In normal cases the cur_devices == fs_devices. But in case * of deleting a seed device, the cur_devices should point to * its own fs_devices listed under the fs_devices->seed_list.
*/
cur_devices = device->fs_devices;
mutex_lock(&fs_devices->device_list_mutex);
list_del_rcu(&device->dev_list);
cur_devices->num_devices--;
cur_devices->total_devices--; /* Update total_devices of the parent fs_devices if it's seed */ if (cur_devices != fs_devices)
fs_devices->total_devices--;
if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
cur_devices->missing_devices--;
/* * At this point, the device is zero sized and detached from the * devices list. All that's left is to zero out the old supers and * free the device. * * We cannot call btrfs_close_bdev() here because we're holding the sb * write lock, and bdev_fput() on the block device will pull in the * ->open_mutex on the block device and it's dependencies. Instead * just flush the device and let the caller do the final bdev_release.
*/ if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
btrfs_scratch_superblocks(fs_info, device); if (device->bdev) {
sync_blockdev(device->bdev);
invalidate_bdev(device->bdev);
}
}
/* * This can happen if cur_devices is the private seed devices list. We * cannot call close_fs_devices() here because it expects the uuid_mutex * to be held, but in fact we don't need that for the private * seed_devices, we can simply decrement cur_devices->opened and then * remove it from our list and free the fs_devices.
*/ if (cur_devices->num_devices == 0) {
list_del_init(&cur_devices->seed_list);
ASSERT(cur_devices->opened == 1, "opened=%d", cur_devices->opened);
cur_devices->opened--;
free_fs_devices(cur_devices);
}
/* * in case of fs with no seed, srcdev->fs_devices will point * to fs_devices of fs_info. However when the dev being replaced is * a seed dev it will point to the seed's local fs_devices. In short * srcdev will have its correct fs_devices in both the cases.
*/
fs_devices = srcdev->fs_devices;
list_del_rcu(&srcdev->dev_list);
list_del(&srcdev->dev_alloc_list);
fs_devices->num_devices--; if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
fs_devices->missing_devices--;
if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
fs_devices->rw_devices--;
/* if this is no devs we rather delete the fs_devices */ if (!fs_devices->num_devices) { /* * On a mounted FS, num_devices can't be zero unless it's a * seed. In case of a seed device being replaced, the replace
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.