staticstruct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
{ struct linear_conf *conf; struct md_rdev *rdev; int ret = -EINVAL; int cnt; int i;
conf = kzalloc(struct_size(conf, disks, raid_disks), GFP_KERNEL); if (!conf) return ERR_PTR(-ENOMEM);
/* * conf->raid_disks is copy of mddev->raid_disks. The reason to * keep a copy of mddev->raid_disks in struct linear_conf is, * mddev->raid_disks may not be consistent with pointers number of * conf->disks[] when it is updated in linear_add() and used to * iterate old conf->disks[] earray in linear_congested(). * Here conf->raid_disks is always consitent with number of * pointers in conf->disks[] array, and mddev->private is updated * with rcu_assign_pointer() in linear_addr(), such race can be * avoided.
*/
conf->raid_disks = raid_disks;
ret = md_integrity_register(mddev); if (ret) {
kfree(conf);
mddev->private = NULL;
} return ret;
}
staticint linear_add(struct mddev *mddev, struct md_rdev *rdev)
{ /* Adding a drive to a linear array allows the array to grow. * It is permitted if the new drive has a matching superblock * already on it, with raid_disk equal to raid_disks. * It is achieved by creating a new linear_private_data structure * and swapping it in in-place of the current one. * The current one is never freed until the array is stopped. * This avoids races.
*/ struct linear_conf *newconf, *oldconf;
if (rdev->saved_raid_disk != mddev->raid_disks) return -EINVAL;
newconf = linear_conf(mddev, mddev->raid_disks + 1); if (IS_ERR(newconf)) return PTR_ERR(newconf);
/* newconf->raid_disks already keeps a copy of * the increased * value of mddev->raid_disks, WARN_ONCE() is just used to make * sure of this. It is possible that oldconf is still referenced * in linear_congested(), therefore kfree_rcu() is used to free * oldconf until no one uses it anymore.
*/
oldconf = rcu_dereference_protected(mddev->private,
lockdep_is_held(&mddev->reconfig_mutex));
mddev->raid_disks++;
WARN_ONCE(mddev->raid_disks != newconf->raid_disks, "copied raid_disks doesn't match mddev->raid_disks");
rcu_assign_pointer(mddev->private, newconf);
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
kfree_rcu(oldconf, rcu); return 0;
}
if (unlikely(bio_sector >= end_sector ||
bio_sector < start_sector)) goto out_of_bounds;
if (unlikely(is_rdev_broken(tmp_dev->rdev))) {
md_error(mddev, tmp_dev->rdev);
bio_io_error(bio); returntrue;
}
if (unlikely(bio_end_sector(bio) > end_sector)) { /* This bio crosses a device boundary, so we have to split it */ struct bio *split = bio_split(bio, end_sector - bio_sector,
GFP_NOIO, &mddev->bio_set);
if (IS_ERR(split)) {
bio->bi_status = errno_to_blk_status(PTR_ERR(split));
bio_endio(bio); returntrue;
}
bio_chain(split, bio);
trace_block_split(split, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
bio = split;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.