// SPDX-License-Identifier: GPL-2.0-or-later /* raid0.c : Multiple Devices driver for Linux Copyright (C) 1994-96 Marc ZYNGIER <zyngier@ufr-info-p7.ibp.fr> or <maz@gloups.fdn.fr> Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
rdev_for_each(rdev2, mddev) {
pr_debug("md/raid0:%s: comparing %pg(%llu)" " with %pg(%llu)\n",
mdname(mddev),
rdev1->bdev,
(unsignedlonglong)rdev1->sectors,
rdev2->bdev,
(unsignedlonglong)rdev2->sectors); if (rdev2 == rdev1) {
pr_debug("md/raid0:%s: END\n",
mdname(mddev)); break;
} if (rdev2->sectors == rdev1->sectors) { /* * Not unique, don't count it as a new * group
*/
pr_debug("md/raid0:%s: EQUAL\n",
mdname(mddev));
c = 1; break;
}
pr_debug("md/raid0:%s: NOT EQUAL\n",
mdname(mddev));
} if (!c) {
pr_debug("md/raid0:%s: ==> UNIQUE\n",
mdname(mddev));
conf->nr_strip_zones++;
pr_debug("md/raid0:%s: %d zones\n",
mdname(mddev), conf->nr_strip_zones);
}
}
pr_debug("md/raid0:%s: FINAL %d zones\n",
mdname(mddev), conf->nr_strip_zones);
/* * now since we have the hard sector sizes, we can make sure * chunk size is a multiple of that sector size
*/ if ((mddev->chunk_sectors << 9) % blksize) {
pr_warn("md/raid0:%s: chunk_size of %d not multiple of block size %d\n",
mdname(mddev),
mddev->chunk_sectors << 9, blksize);
err = -EINVAL; goto abort;
}
/* The first zone must contain all devices, so here we check that * there is a proper alignment of slots to devices and find them all
*/
zone = &conf->strip_zone[0];
cnt = 0;
smallest = NULL;
dev = conf->devlist;
err = -EINVAL;
rdev_for_each(rdev1, mddev) { int j = rdev1->raid_disk;
if (mddev->level == 10) { /* taking over a raid10-n2 array */
j /= 2;
rdev1->new_raid_disk = j;
}
if (mddev->level == 1) { /* taiking over a raid1 array- * we have only one active disk
*/
j = 0;
rdev1->new_raid_disk = j;
}
if (j < 0) {
pr_warn("md/raid0:%s: remove inactive devices before converting to RAID0\n",
mdname(mddev)); goto abort;
} if (j >= mddev->raid_disks) {
pr_warn("md/raid0:%s: bad disk number %d - aborting!\n",
mdname(mddev), j); goto abort;
} if (dev[j]) {
pr_warn("md/raid0:%s: multiple devices for %d - aborting!\n",
mdname(mddev), j); goto abort;
}
dev[j] = rdev1;
if (!smallest || (rdev1->sectors < smallest->sectors))
smallest = rdev1;
cnt++;
} if (cnt != mddev->raid_disks) {
pr_warn("md/raid0:%s: too few disks (%d of %d) - aborting!\n",
mdname(mddev), cnt, mddev->raid_disks); goto abort;
}
zone->nb_dev = cnt;
zone->zone_end = smallest->sectors * cnt;
curr_zone_end = zone->zone_end;
/* now do the other zones */ for (i = 1; i < conf->nr_strip_zones; i++)
{ int j;
zone = conf->strip_zone + i;
dev = conf->devlist + i * mddev->raid_disks;
pr_debug("md/raid0:%s: zone %d\n", mdname(mddev), i);
zone->dev_start = smallest->sectors;
smallest = NULL;
c = 0;
if (conf->layout == RAID0_ORIG_LAYOUT) { for (i = 1; i < conf->nr_strip_zones; i++) {
sector_t first_sector = conf->strip_zone[i-1].zone_end;
sector_div(first_sector, mddev->chunk_sectors);
zone = conf->strip_zone + i; /* disk_shift is first disk index used in the zone */
zone->disk_shift = sector_div(first_sector,
zone->nb_dev);
}
}
/* Find the zone which holds a particular offset * Update *sectorp to be an offset in that zone
*/ staticstruct strip_zone *find_zone(struct r0conf *conf,
sector_t *sectorp)
{ int i; struct strip_zone *z = conf->strip_zone;
sector_t sector = *sectorp;
for (i = 0; i < conf->nr_strip_zones; i++) if (sector < z[i].zone_end) { if (i)
*sectorp = sector - z[i-1].zone_end; return z + i;
}
BUG();
}
/* * remaps the bio to the target device. we separate two flows. * power 2 flow and a general flow for the sake of performance
*/ staticstruct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
sector_t sector, sector_t *sector_offset)
{ unsignedint sect_in_chunk;
sector_t chunk; struct r0conf *conf = mddev->private; int raid_disks = conf->strip_zone[0].nb_dev; unsignedint chunk_sects = mddev->chunk_sectors;
if (is_power_of_2(chunk_sects)) { int chunksect_bits = ffz(~chunk_sects); /* find the sector offset inside the chunk */
sect_in_chunk = sector & (chunk_sects - 1);
sector >>= chunksect_bits; /* chunk in zone */
chunk = *sector_offset; /* quotient is the chunk in real device*/
sector_div(chunk, zone->nb_dev << chunksect_bits);
} else{
sect_in_chunk = sector_div(sector, chunk_sects);
chunk = *sector_offset;
sector_div(chunk, chunk_sects * zone->nb_dev);
} /* * position the bio over the real device * real sector = chunk in device + starting of zone * + the position in the chunk
*/
*sector_offset = (chunk * chunk_sects) + sect_in_chunk; return conf->devlist[(zone - conf->strip_zone)*raid_disks
+ sector_div(sector, zone->nb_dev)];
}
staticint raid0_run(struct mddev *mddev)
{ struct r0conf *conf; int ret;
if (mddev->chunk_sectors == 0) {
pr_warn("md/raid0:%s: chunk size must be set.\n", mdname(mddev)); return -EINVAL;
} if (md_check_no_bitmap(mddev)) return -EINVAL;
/* if private is not null, we are here after takeover */ if (mddev->private == NULL) {
ret = create_strip_zones(mddev, &conf); if (ret < 0) return ret;
mddev->private = conf;
}
conf = mddev->private; if (!mddev_is_dm(mddev)) {
ret = raid0_set_limits(mddev); if (ret) return ret;
}
pr_debug("md/raid0:%s: md_size is %llu sectors.\n",
mdname(mddev),
(unsignedlonglong)mddev->array_sectors);
dump_zones(mddev);
return md_integrity_register(mddev);
}
/* * Convert disk_index to the disk order in which it is read/written. * For example, if we have 4 disks, they are numbered 0,1,2,3. If we * write the disks starting at disk 3, then the read/write order would * be disk 3, then 0, then 1, and then disk 2 and we want map_disk_shift() * to map the disks as follows 0,1,2,3 => 1,2,3,0. So disk 0 would map * to 1, 1 to 2, 2 to 3, and 3 to 0. That way we can compare disks in * that 'output' space to understand the read/write disk ordering.
*/ staticint map_disk_shift(int disk_index, int num_disks, int disk_shift)
{ return ((disk_index + num_disks - disk_shift) % num_disks);
}
if (mddev->degraded != 1) {
pr_warn("md/raid0:%s: raid5 must be degraded! Degraded disks: %d\n",
mdname(mddev),
mddev->degraded); return ERR_PTR(-EINVAL);
}
rdev_for_each(rdev, mddev) { /* check slot number for a disk */ if (rdev->raid_disk == mddev->raid_disks-1) {
pr_warn("md/raid0:%s: raid5 must have missing parity disk!\n",
mdname(mddev)); return ERR_PTR(-EINVAL);
}
rdev->sectors = mddev->dev_sectors;
}
/* Set new parameters */
mddev->new_level = 0;
mddev->new_layout = 0;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->raid_disks--;
mddev->delta_disks = -1; /* make sure it will be not marked as dirty */
mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
/* Check layout: * - far_copies must be 1 * - near_copies must be 2 * - disks number must be even * - all mirrors must be already degraded
*/ if (mddev->layout != ((1 << 8) + 2)) {
pr_warn("md/raid0:%s:: Raid0 cannot takeover layout: 0x%x\n",
mdname(mddev),
mddev->layout); return ERR_PTR(-EINVAL);
} if (mddev->raid_disks & 1) {
pr_warn("md/raid0:%s: Raid0 cannot takeover Raid10 with odd disk number.\n",
mdname(mddev)); return ERR_PTR(-EINVAL);
} if (mddev->degraded != (mddev->raid_disks>>1)) {
pr_warn("md/raid0:%s: All mirrors must be already degraded!\n",
mdname(mddev)); return ERR_PTR(-EINVAL);
}
/* Set new parameters */
mddev->new_level = 0;
mddev->new_layout = 0;
mddev->new_chunk_sectors = mddev->chunk_sectors;
mddev->delta_disks = - mddev->raid_disks / 2;
mddev->raid_disks += mddev->delta_disks;
mddev->degraded = 0; /* make sure it will be not marked as dirty */
mddev->resync_offset = MaxSector;
mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
staticvoid *raid0_takeover(struct mddev *mddev)
{ /* raid0 can take over: * raid4 - if all data disks are active. * raid5 - providing it is Raid4 layout and one disk is faulty * raid10 - assuming we have all necessary active disks * raid1 - with (N -1) mirror drives faulty
*/
if (mddev->bitmap) {
pr_warn("md/raid0: %s: cannot takeover array with bitmap\n",
mdname(mddev)); return ERR_PTR(-EBUSY);
} if (mddev->level == 4) return raid0_takeover_raid45(mddev);
if (mddev->level == 5) { if (mddev->layout == ALGORITHM_PARITY_N) return raid0_takeover_raid45(mddev);
pr_warn("md/raid0:%s: Raid can only takeover Raid5 with layout: %d\n",
mdname(mddev), ALGORITHM_PARITY_N);
}
if (mddev->level == 10) return raid0_takeover_raid10(mddev);
if (mddev->level == 1) return raid0_takeover_raid1(mddev);
pr_warn("Takeover from raid%i to raid0 not supported\n",
mddev->level);
return ERR_PTR(-EINVAL);
}
staticvoid raid0_quiesce(struct mddev *mddev, int quiesce)
{
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.