/** The maximum logical space is 4 petabytes, which is 1 terablock. */ staticconst block_count_t MAXIMUM_VDO_LOGICAL_BLOCKS = 1024ULL * 1024 * 1024 * 1024;
/** The maximum physical space is 256 terabytes, which is 64 gigablocks. */ staticconst block_count_t MAXIMUM_VDO_PHYSICAL_BLOCKS = 1024ULL * 1024 * 1024 * 64;
/* * The current version for the data encoded in the super block. This must be changed any time there * is a change to encoding of the component data of any VDO component.
*/ staticconststruct version_number VDO_COMPONENT_DATA_41_0 = {
.major_version = 41,
.minor_version = 0,
};
/* This is the minimum size, if the super block contains no components. */
.size = VDO_SUPER_BLOCK_FIXED_SIZE - VDO_ENCODED_HEADER_SIZE,
};
/** * validate_version() - Check whether a version matches an expected version. * @expected_version: The expected version. * @actual_version: The version being validated. * @component_name: The name of the component or the calling function (for error logging). * * Logs an error describing a mismatch. * * Return: VDO_SUCCESS if the versions are the same, * VDO_UNSUPPORTED_VERSION if the versions don't match.
*/ staticint __must_check validate_version(struct version_number expected_version, struct version_number actual_version, constchar *component_name)
{ if (!vdo_are_same_version(expected_version, actual_version)) { return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, "%s version mismatch, expected %d.%d, got %d.%d",
component_name,
expected_version.major_version,
expected_version.minor_version,
actual_version.major_version,
actual_version.minor_version);
}
return VDO_SUCCESS;
}
/** * vdo_validate_header() - Check whether a header matches expectations. * @expected_header: The expected header. * @actual_header: The header being validated. * @exact_size: If true, the size fields of the two headers must be the same, otherwise it is * required that actual_header.size >= expected_header.size. * @name: The name of the component or the calling function (for error logging). * * Logs an error describing the first mismatch found. * * Return: VDO_SUCCESS if the header meets expectations, * VDO_INCORRECT_COMPONENT if the component ids don't match, * VDO_UNSUPPORTED_VERSION if the versions or sizes don't match.
*/ int vdo_validate_header(conststruct header *expected_header, conststruct header *actual_header, bool exact_size, constchar *name)
{ int result;
if (expected_header->id != actual_header->id) { return vdo_log_error_strerror(VDO_INCORRECT_COMPONENT, "%s ID mismatch, expected %d, got %d",
name, expected_header->id,
actual_header->id);
}
result = validate_version(expected_header->version, actual_header->version,
name); if (result != VDO_SUCCESS) return result;
/** * decode_volume_geometry() - Decode the on-disk representation of a volume geometry from a buffer. * @buffer: A buffer to decode from. * @offset: The offset in the buffer at which to decode. * @geometry: The structure to receive the decoded fields. * @version: The geometry block version to decode.
*/ staticvoid decode_volume_geometry(u8 *buffer, size_t *offset, struct volume_geometry *geometry, u32 version)
{
u32 unused, mem; enum volume_region_id id;
nonce_t nonce;
block_count_t bio_offset = 0; bool sparse;
/* This is for backwards compatibility. */
decode_u32_le(buffer, offset, &unused);
geometry->unused = unused;
vdo_decode_header(buffer, offset, &header);
result = vdo_validate_header(&VDO_BLOCK_MAP_HEADER_2_0, &header, true, __func__); if (result != VDO_SUCCESS) return result;
initial_offset = *offset;
decode_u64_le(buffer, offset, &flat_page_origin);
result = VDO_ASSERT(flat_page_origin == VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN, "Flat page origin must be %u (recorded as %llu)",
VDO_BLOCK_MAP_FLAT_PAGE_ORIGIN,
(unsignedlonglong) state->flat_page_origin); if (result != VDO_SUCCESS) return result;
decode_u64_le(buffer, offset, &flat_page_count);
result = VDO_ASSERT(flat_page_count == 0, "Flat page count must be 0 (recorded as %llu)",
(unsignedlonglong) state->flat_page_count); if (result != VDO_SUCCESS) return result;
VDO_ASSERT_LOG_ONLY(VDO_BLOCK_MAP_HEADER_2_0.size == *offset - initial_offset, "encoded block map component size must match header size");
}
/** * vdo_compute_new_forest_pages() - Compute the number of pages which must be allocated at each * level in order to grow the forest to a new number of entries. * @entries: The new number of entries the block map must address. * * Return: The total number of non-leaf pages required.
*/
block_count_t vdo_compute_new_forest_pages(root_count_t root_count, struct boundary *old_sizes,
block_count_t entries, struct boundary *new_sizes)
{
page_count_t leaf_pages = max(vdo_compute_block_map_page_count(entries), 1U);
page_count_t level_size = DIV_ROUND_UP(leaf_pages, root_count);
block_count_t total_pages = 0;
height_t height;
VDO_ASSERT_LOG_ONLY(VDO_RECOVERY_JOURNAL_HEADER_7_0.size == *offset - initial_offset, "encoded recovery journal component size must match header size");
}
/** * decode_recovery_journal_state_7_0() - Decode the state of a recovery journal saved in a buffer. * @buffer: The buffer containing the saved state. * @state: A pointer to a recovery journal state to hold the result of a successful decode. * * Return: VDO_SUCCESS or an error code.
*/ staticint __must_check decode_recovery_journal_state_7_0(u8 *buffer, size_t *offset, struct recovery_journal_state_7_0 *state)
{ struct header header; int result;
size_t initial_offset;
sequence_number_t journal_start;
block_count_t logical_blocks_used, block_map_data_blocks;
vdo_decode_header(buffer, offset, &header);
result = vdo_validate_header(&VDO_RECOVERY_JOURNAL_HEADER_7_0, &header, true,
__func__); if (result != VDO_SUCCESS) return result;
/** * vdo_get_journal_operation_name() - Get the name of a journal operation. * @operation: The operation to name. * * Return: The name of the operation.
*/ constchar *vdo_get_journal_operation_name(enum journal_operation operation)
{ switch (operation) { case VDO_JOURNAL_DATA_REMAPPING: return"data remapping";
case VDO_JOURNAL_BLOCK_MAP_REMAPPING: return"block map remapping";
default: return"unknown journal operation";
}
}
/** * encode_slab_depot_state_2_0() - Encode the state of a slab depot into a buffer.
*/ staticvoid encode_slab_depot_state_2_0(u8 *buffer, size_t *offset, struct slab_depot_state_2_0 state)
{
size_t initial_offset;
/** * vdo_configure_slab_depot() - Configure the slab depot. * @partition: The slab depot partition * @slab_config: The configuration of a single slab. * @zone_count: The number of zones the depot will use. * @state: The state structure to be configured. * * Configures the slab_depot for the specified storage capacity, finding the number of data blocks * that will fit and still leave room for the depot metadata, then return the saved state for that * configuration. * * Return: VDO_SUCCESS or an error code.
*/ int vdo_configure_slab_depot(conststruct partition *partition, struct slab_config slab_config, zone_count_t zone_count, struct slab_depot_state_2_0 *state)
{
block_count_t total_slab_blocks, total_data_blocks;
size_t slab_count;
physical_block_number_t last_block;
block_count_t slab_size = slab_config.slab_blocks;
/* We do not allow runt slabs, so we waste up to a slab's worth. */
slab_count = (partition->count / slab_size); if (slab_count == 0) return VDO_NO_SPACE;
if (slab_count > MAX_VDO_SLABS) return VDO_TOO_MANY_SLABS;
/** * vdo_configure_slab() - Measure and initialize the configuration to use for each slab. * @slab_size: The number of blocks per slab. * @slab_journal_blocks: The number of blocks for the slab journal. * @slab_config: The slab configuration to initialize. * * Return: VDO_SUCCESS or an error code.
*/ int vdo_configure_slab(block_count_t slab_size, block_count_t slab_journal_blocks, struct slab_config *slab_config)
{
block_count_t ref_blocks, meta_blocks, data_blocks;
block_count_t flushing_threshold, remaining, blocking_threshold;
block_count_t minimal_extra_space, scrubbing_threshold;
if (slab_journal_blocks >= slab_size) return VDO_BAD_CONFIGURATION;
/* * This calculation should technically be a recurrence, but the total number of metadata * blocks is currently less than a single block of ref_counts, so we'd gain at most one * data block in each slab with more iteration.
*/
ref_blocks = vdo_get_saved_reference_count_size(slab_size - slab_journal_blocks);
meta_blocks = (ref_blocks + slab_journal_blocks);
/* Make sure configured slabs are not too small. */ if (meta_blocks >= slab_size) return VDO_BAD_CONFIGURATION;
data_blocks = slab_size - meta_blocks;
/* * Configure the slab journal thresholds. The flush threshold is 168 of 224 blocks in * production, or 3/4ths, so we use this ratio for all sizes.
*/
flushing_threshold = ((slab_journal_blocks * 3) + 3) / 4; /* * The blocking threshold should be far enough from the flushing threshold to not produce * delays, but far enough from the end of the journal to allow multiple successive recovery * failures.
*/
remaining = slab_journal_blocks - flushing_threshold;
blocking_threshold = flushing_threshold + ((remaining * 5) / 7); /* The scrubbing threshold should be at least 2048 entries before the end of the journal. */
minimal_extra_space = 1 + (MAXIMUM_VDO_USER_VIOS / VDO_SLAB_JOURNAL_FULL_ENTRIES_PER_BLOCK);
scrubbing_threshold = blocking_threshold; if (slab_journal_blocks > minimal_extra_space)
scrubbing_threshold = slab_journal_blocks - minimal_extra_space; if (blocking_threshold > scrubbing_threshold)
blocking_threshold = scrubbing_threshold;
/** * allocate_partition() - Allocate a partition and add it to a layout. * @layout: The layout containing the partition. * @id: The id of the partition. * @offset: The offset into the layout at which the partition begins. * @size: The size of the partition in blocks. * * Return: VDO_SUCCESS or an error.
*/ staticint allocate_partition(struct layout *layout, u8 id,
physical_block_number_t offset, block_count_t size)
{ struct partition *partition; int result;
result = vdo_allocate(1, struct partition, __func__, &partition); if (result != VDO_SUCCESS) return result;
/** * make_partition() - Create a new partition from the beginning or end of the unused space in a * layout. * @layout: The layout. * @id: The id of the partition to make. * @size: The number of blocks to carve out; if 0, all remaining space will be used. * @beginning: True if the partition should start at the beginning of the unused space. * * Return: A success or error code, particularly VDO_NO_SPACE if there are fewer than size blocks * remaining.
*/ staticint __must_check make_partition(struct layout *layout, enum partition_id id,
block_count_t size, bool beginning)
{ int result;
physical_block_number_t offset;
block_count_t free_blocks = layout->last_free - layout->first_free;
/** * vdo_initialize_layout() - Lay out the partitions of a vdo. * @size: The entire size of the vdo. * @offset: The start of the layout on the underlying storage in blocks. * @block_map_blocks: The size of the block map partition. * @journal_blocks: The size of the journal partition. * @summary_blocks: The size of the slab summary partition. * @layout: The layout to initialize. * * Return: VDO_SUCCESS or an error.
*/ int vdo_initialize_layout(block_count_t size, physical_block_number_t offset,
block_count_t block_map_blocks, block_count_t journal_blocks,
block_count_t summary_blocks, struct layout *layout)
{ int result;
block_count_t necessary_size =
(offset + block_map_blocks + journal_blocks + summary_blocks);
if (necessary_size > size) return vdo_log_error_strerror(VDO_NO_SPACE, "Not enough space to make a VDO");
result = make_partition(layout, VDO_BLOCK_MAP_PARTITION, block_map_blocks, true); if (result != VDO_SUCCESS) {
vdo_uninitialize_layout(layout); return result;
}
result = make_partition(layout, VDO_SLAB_SUMMARY_PARTITION, summary_blocks, false); if (result != VDO_SUCCESS) {
vdo_uninitialize_layout(layout); return result;
}
result = make_partition(layout, VDO_RECOVERY_JOURNAL_PARTITION, journal_blocks, false); if (result != VDO_SUCCESS) {
vdo_uninitialize_layout(layout); return result;
}
result = make_partition(layout, VDO_SLAB_DEPOT_PARTITION, 0, true); if (result != VDO_SUCCESS)
vdo_uninitialize_layout(layout);
return result;
}
/** * vdo_uninitialize_layout() - Clean up a layout. * @layout: The layout to clean up. * * All partitions created by this layout become invalid pointers.
*/ void vdo_uninitialize_layout(struct layout *layout)
{ while (layout->head != NULL) { struct partition *part = layout->head;
layout->head = part->next;
vdo_free(part);
}
memset(layout, 0, sizeof(struct layout));
}
/** * vdo_get_partition() - Get a partition by id. * @layout: The layout from which to get a partition. * @id: The id of the partition. * @partition_ptr: A pointer to hold the partition. * * Return: VDO_SUCCESS or an error.
*/ int vdo_get_partition(struct layout *layout, enum partition_id id, struct partition **partition_ptr)
{ struct partition *partition;
for (partition = layout->head; partition != NULL; partition = partition->next) { if (partition->id == id) { if (partition_ptr != NULL)
*partition_ptr = partition; return VDO_SUCCESS;
}
}
return VDO_UNKNOWN_PARTITION;
}
/** * vdo_get_known_partition() - Get a partition by id from a validated layout. * @layout: The layout from which to get a partition. * @id: The id of the partition. * * Return: the partition
*/ struct partition *vdo_get_known_partition(struct layout *layout, enum partition_id id)
{ struct partition *partition; int result = vdo_get_partition(layout, id, &partition);
VDO_ASSERT_LOG_ONLY(result == VDO_SUCCESS, "layout has expected partition: %u", id);
BUILD_BUG_ON(sizeof(enum partition_id) != sizeof(u8));
VDO_ASSERT_LOG_ONLY(layout->num_partitions <= U8_MAX, "layout partition count must fit in a byte");
vdo_decode_header(buffer, offset, &header); /* Layout is variable size, so only do a minimum size check here. */
result = vdo_validate_header(&VDO_LAYOUT_HEADER_3_0, &header, false, __func__); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(sizeof(struct layout_3_0) == *offset - initial_offset, "decoded size of a layout header must match structure"); if (result != VDO_SUCCESS) return result;
result = allocate_partition(layout, id, partition_offset, count); if (result != VDO_SUCCESS) {
vdo_uninitialize_layout(layout); return result;
}
}
/* Validate that the layout has all (and only) the required partitions */ for (i = 0; i < VDO_PARTITION_COUNT; i++) {
result = vdo_get_partition(layout, REQUIRED_PARTITIONS[i], &partition); if (result != VDO_SUCCESS) {
vdo_uninitialize_layout(layout); return vdo_log_error_strerror(result, "layout is missing required partition %u",
REQUIRED_PARTITIONS[i]);
}
start += partition->count;
}
if (start != size) {
vdo_uninitialize_layout(layout); return vdo_log_error_strerror(UDS_BAD_STATE, "partitions do not cover the layout");
}
return VDO_SUCCESS;
}
/** * pack_vdo_config() - Convert a vdo_config to its packed on-disk representation. * @config: The vdo config to convert. * * Return: The platform-independent representation of the config.
*/ staticstruct packed_vdo_config pack_vdo_config(struct vdo_config config)
{ return (struct packed_vdo_config) {
.logical_blocks = __cpu_to_le64(config.logical_blocks),
.physical_blocks = __cpu_to_le64(config.physical_blocks),
.slab_size = __cpu_to_le64(config.slab_size),
.recovery_journal_size = __cpu_to_le64(config.recovery_journal_size),
.slab_journal_blocks = __cpu_to_le64(config.slab_journal_blocks),
};
}
/** * pack_vdo_component() - Convert a vdo_component to its packed on-disk representation. * @component: The VDO component data to convert. * * Return: The platform-independent representation of the component.
*/ staticstruct packed_vdo_component_41_0 pack_vdo_component(conststruct vdo_component component)
{ return (struct packed_vdo_component_41_0) {
.state = __cpu_to_le32(component.state),
.complete_recoveries = __cpu_to_le64(component.complete_recoveries),
.read_only_recoveries = __cpu_to_le64(component.read_only_recoveries),
.config = pack_vdo_config(component.config),
.nonce = __cpu_to_le64(component.nonce),
};
}
/** * unpack_vdo_config() - Convert a packed_vdo_config to its native in-memory representation. * @config: The packed vdo config to convert. * * Return: The native in-memory representation of the vdo config.
*/ staticstruct vdo_config unpack_vdo_config(struct packed_vdo_config config)
{ return (struct vdo_config) {
.logical_blocks = __le64_to_cpu(config.logical_blocks),
.physical_blocks = __le64_to_cpu(config.physical_blocks),
.slab_size = __le64_to_cpu(config.slab_size),
.recovery_journal_size = __le64_to_cpu(config.recovery_journal_size),
.slab_journal_blocks = __le64_to_cpu(config.slab_journal_blocks),
};
}
/** * unpack_vdo_component_41_0() - Convert a packed_vdo_component_41_0 to its native in-memory * representation. * @component: The packed vdo component data to convert. * * Return: The native in-memory representation of the component.
*/ staticstruct vdo_component unpack_vdo_component_41_0(struct packed_vdo_component_41_0 component)
{ return (struct vdo_component) {
.state = __le32_to_cpu(component.state),
.complete_recoveries = __le64_to_cpu(component.complete_recoveries),
.read_only_recoveries = __le64_to_cpu(component.read_only_recoveries),
.config = unpack_vdo_config(component.config),
.nonce = __le64_to_cpu(component.nonce),
};
}
/** * decode_vdo_component() - Decode the component data for the vdo itself out of the super block. * * Return: VDO_SUCCESS or an error.
*/ staticint decode_vdo_component(u8 *buffer, size_t *offset, struct vdo_component *component)
{ struct version_number version; struct packed_vdo_component_41_0 packed; int result;
decode_version_number(buffer, offset, &version);
result = validate_version(version, VDO_COMPONENT_DATA_41_0, "VDO component data"); if (result != VDO_SUCCESS) return result;
/** * vdo_validate_config() - Validate constraints on a VDO config. * @config: The VDO config. * @physical_block_count: The minimum block count of the underlying storage. * @logical_block_count: The expected logical size of the VDO, or 0 if the logical size may be * unspecified. * * Return: A success or error code.
*/ int vdo_validate_config(conststruct vdo_config *config,
block_count_t physical_block_count,
block_count_t logical_block_count)
{ struct slab_config slab_config; int result;
result = VDO_ASSERT(config->slab_size > 0, "slab size unspecified"); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(is_power_of_2(config->slab_size), "slab size must be a power of two"); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(config->slab_size <= (1 << MAX_VDO_SLAB_BITS), "slab size must be less than or equal to 2^%d",
MAX_VDO_SLAB_BITS); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(config->slab_journal_blocks <= config->slab_size, "slab journal size is within expected bound"); if (result != VDO_SUCCESS) return result;
result = vdo_configure_slab(config->slab_size, config->slab_journal_blocks,
&slab_config); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT((slab_config.data_blocks >= 1), "slab must be able to hold at least one block"); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(config->physical_blocks > 0, "physical blocks unspecified"); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(config->physical_blocks <= MAXIMUM_VDO_PHYSICAL_BLOCKS, "physical block count %llu exceeds maximum %llu",
(unsignedlonglong) config->physical_blocks,
(unsignedlonglong) MAXIMUM_VDO_PHYSICAL_BLOCKS); if (result != VDO_SUCCESS) return VDO_OUT_OF_RANGE;
if (physical_block_count != config->physical_blocks) {
vdo_log_error("A physical size of %llu blocks was specified, not the %llu blocks configured in the vdo super block",
(unsignedlonglong) physical_block_count,
(unsignedlonglong) config->physical_blocks); return VDO_PARAMETER_MISMATCH;
}
if (logical_block_count > 0) {
result = VDO_ASSERT((config->logical_blocks > 0), "logical blocks unspecified"); if (result != VDO_SUCCESS) return result;
if (logical_block_count != config->logical_blocks) {
vdo_log_error("A logical size of %llu blocks was specified, but that differs from the %llu blocks configured in the vdo super block",
(unsignedlonglong) logical_block_count,
(unsignedlonglong) config->logical_blocks); return VDO_PARAMETER_MISMATCH;
}
}
result = VDO_ASSERT(config->logical_blocks <= MAXIMUM_VDO_LOGICAL_BLOCKS, "logical blocks too large"); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(config->recovery_journal_size > 0, "recovery journal size unspecified"); if (result != VDO_SUCCESS) return result;
result = VDO_ASSERT(is_power_of_2(config->recovery_journal_size), "recovery journal size must be a power of two"); if (result != VDO_SUCCESS) return result;
return result;
}
/** * vdo_destroy_component_states() - Clean up any allocations in a vdo_component_states. * @states: The component states to destroy.
*/ void vdo_destroy_component_states(struct vdo_component_states *states)
{ if (states == NULL) return;
vdo_uninitialize_layout(&states->layout);
}
/** * decode_components() - Decode the components now that we know the component data is a version we * understand. * @buffer: The buffer being decoded. * @offset: The offset to start decoding from. * @geometry: The vdo geometry * @states: An object to hold the successfully decoded state. * * Return: VDO_SUCCESS or an error.
*/ staticint __must_check decode_components(u8 *buffer, size_t *offset, struct volume_geometry *geometry, struct vdo_component_states *states)
{ int result;
result = decode_layout(buffer, offset, vdo_get_data_region_start(*geometry) + 1,
states->vdo.config.physical_blocks, &states->layout); if (result != VDO_SUCCESS) return result;
result = decode_recovery_journal_state_7_0(buffer, offset,
&states->recovery_journal); if (result != VDO_SUCCESS) return result;
result = decode_slab_depot_state_2_0(buffer, offset, &states->slab_depot); if (result != VDO_SUCCESS) return result;
result = decode_block_map_state_2_0(buffer, offset, &states->block_map); if (result != VDO_SUCCESS) return result;
VDO_ASSERT_LOG_ONLY(*offset == VDO_COMPONENT_DATA_OFFSET + VDO_COMPONENT_DATA_SIZE, "All decoded component data was used"); return VDO_SUCCESS;
}
/** * vdo_decode_component_states() - Decode the payload of a super block. * @buffer: The buffer containing the encoded super block contents. * @geometry: The vdo geometry * @states: A pointer to hold the decoded states. * * Return: VDO_SUCCESS or an error.
*/ int vdo_decode_component_states(u8 *buffer, struct volume_geometry *geometry, struct vdo_component_states *states)
{ int result;
size_t offset = VDO_COMPONENT_DATA_OFFSET;
/* This is for backwards compatibility. */
decode_u32_le(buffer, &offset, &states->unused);
/* Check the VDO volume version */
decode_version_number(buffer, &offset, &states->volume_version);
result = validate_version(VDO_VOLUME_VERSION_67_0, states->volume_version, "volume"); if (result != VDO_SUCCESS) return result;
result = decode_components(buffer, &offset, geometry, states); if (result != VDO_SUCCESS)
vdo_uninitialize_layout(&states->layout);
return result;
}
/** * vdo_validate_component_states() - Validate the decoded super block configuration. * @states: The state decoded from the super block. * @geometry_nonce: The nonce from the geometry block. * @physical_size: The minimum block count of the underlying storage. * @logical_size: The expected logical size of the VDO, or 0 if the logical size may be * unspecified. * * Return: VDO_SUCCESS or an error if the configuration is invalid.
*/ int vdo_validate_component_states(struct vdo_component_states *states,
nonce_t geometry_nonce, block_count_t physical_size,
block_count_t logical_size)
{ if (geometry_nonce != states->vdo.nonce) { return vdo_log_error_strerror(VDO_BAD_NONCE, "Geometry nonce %llu does not match superblock nonce %llu",
(unsignedlonglong) geometry_nonce,
(unsignedlonglong) states->vdo.nonce);
}
/* * Even though the buffer is a full block, to avoid the potential corruption from a torn * write, the entire encoding must fit in the first sector.
*/
VDO_ASSERT_LOG_ONLY(offset <= VDO_SECTOR_SIZE, "entire superblock must fit in one sector");
}
/** * vdo_decode_super_block() - Decode a super block from its on-disk representation.
*/ int vdo_decode_super_block(u8 *buffer)
{ struct header header; int result;
u32 checksum, saved_checksum;
size_t offset = 0;
/* Decode and validate the header. */
vdo_decode_header(buffer, &offset, &header);
result = vdo_validate_header(&SUPER_BLOCK_HEADER_12_0, &header, false, __func__); if (result != VDO_SUCCESS) return result;
if (header.size > VDO_COMPONENT_DATA_SIZE + sizeof(u32)) { /* * We can't check release version or checksum until we know the content size, so we * have to assume a version mismatch on unexpected values.
*/ return vdo_log_error_strerror(VDO_UNSUPPORTED_VERSION, "super block contents too large: %zu",
header.size);
}
/* Skip past the component data for now, to verify the checksum. */
offset += VDO_COMPONENT_DATA_SIZE;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.