// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2023 Red Hat
*/
/* * This file contains the main entry points for normal operations on a vdo as well as functions for * constructing and destroying vdo instances (in memory).
*/
/** * DOC: * * A read_only_notifier has a single completion which is used to perform read-only notifications, * however, vdo_enter_read_only_mode() may be called from any thread. A pair of fields, protected * by a spinlock, are used to control the read-only mode entry process. The first field holds the * read-only error. The second is the state field, which may hold any of the four special values * enumerated here. * * When vdo_enter_read_only_mode() is called from some vdo thread, if the read_only_error field * already contains an error (i.e. its value is not VDO_SUCCESS), then some other error has already * initiated the read-only process, and nothing more is done. Otherwise, the new error is stored in * the read_only_error field, and the state field is consulted. If the state is MAY_NOTIFY, it is * set to NOTIFYING, and the notification process begins. If the state is MAY_NOT_NOTIFY, then * notifications are currently disallowed, generally due to the vdo being suspended. In this case, * the nothing more will be done until the vdo is resumed, at which point the notification will be * performed. In any other case, the vdo is already read-only, and there is nothing more to do.
*/
/* A linked list is adequate for the small number of entries we expect. */ struct device_registry { struct list_head links; /* TODO: Convert to rcu per kernel recommendation. */
rwlock_t lock;
};
staticstruct device_registry registry;
/** * vdo_initialize_device_registry_once() - Initialize the necessary structures for the device * registry.
*/ void vdo_initialize_device_registry_once(void)
{
INIT_LIST_HEAD(®istry.links);
rwlock_init(®istry.lock);
}
/** * filter_vdos_locked() - Find a vdo in the registry if it exists there. * @filter: The filter function to apply to devices. * @context: A bit of context to provide the filter. * * Context: Must be called holding the lock. * * Return: the vdo object found, if any.
*/ staticstruct vdo * __must_check filter_vdos_locked(vdo_filter_fn filter, constvoid *context)
{ struct vdo *vdo;
list_for_each_entry(vdo, ®istry.links, registration) { if (filter(vdo, context)) return vdo;
}
return NULL;
}
/** * vdo_find_matching() - Find and return the first (if any) vdo matching a given filter function. * @filter: The filter function to apply to vdos. * @context: A bit of context to provide the filter.
*/ struct vdo *vdo_find_matching(vdo_filter_fn filter, constvoid *context)
{ struct vdo *vdo;
for (zone = 0; zone < count; zone++)
thread_ids[zone] = config->thread_count++;
}
/** * initialize_thread_config() - Initialize the thread mapping * * If the logical, physical, and hash zone counts are all 0, a single thread will be shared by all * three plus the packer and recovery journal. Otherwise, there must be at least one of each type, * and each will have its own thread, as will the packer and recovery journal. * * Return: VDO_SUCCESS or an error.
*/ staticint __must_check initialize_thread_config(struct thread_count_config counts, struct thread_config *config)
{ int result; bool single = ((counts.logical_zones + counts.physical_zones + counts.hash_zones) == 0);
/** * read_geometry_block() - Synchronously read the geometry block from a vdo's underlying block * device. * @vdo: The vdo whose geometry is to be read. * * Return: VDO_SUCCESS or an error code.
*/ staticint __must_check read_geometry_block(struct vdo *vdo)
{ struct vio *vio; char *block; int result;
result = vdo_allocate(VDO_BLOCK_SIZE, u8, __func__, &block); if (result != VDO_SUCCESS) return result;
result = create_metadata_vio(vdo, VIO_TYPE_GEOMETRY, VIO_PRIORITY_HIGH, NULL,
block, &vio); if (result != VDO_SUCCESS) {
vdo_free(block); return result;
}
/* * This is only safe because, having not already loaded the geometry, the vdo's geometry's * bio_offset field is 0, so the fact that vio_reset_bio() will subtract that offset from * the supplied pbn is not a problem.
*/
result = vio_reset_bio(vio, block, NULL, REQ_OP_READ,
VDO_GEOMETRY_BLOCK_LOCATION); if (result != VDO_SUCCESS) {
free_vio(vdo_forget(vio));
vdo_free(block); return result;
}
bio_set_dev(vio->bio, vdo_get_backing_device(vdo));
submit_bio_wait(vio->bio);
result = blk_status_to_errno(vio->bio->bi_status);
free_vio(vdo_forget(vio)); if (result != 0) {
vdo_log_error_strerror(result, "synchronous read failed");
vdo_free(block); return -EIO;
}
result = vdo_parse_geometry_block((u8 *) block, &vdo->geometry);
vdo_free(block); return result;
}
staticbool get_zone_thread_name(const thread_id_t thread_ids[], zone_count_t count,
thread_id_t id, constchar *prefix, char *buffer, size_t buffer_length)
{ if (id >= thread_ids[0]) {
thread_id_t index = id - thread_ids[0];
/** * get_thread_name() - Format the name of the worker thread desired to support a given work queue. * @thread_config: The thread configuration. * @thread_id: The thread id. * @buffer: Where to put the formatted name. * @buffer_length: Size of the output buffer. * * The physical layer may add a prefix identifying the product; the output from this function * should just identify the thread.
*/ staticvoid get_thread_name(conststruct thread_config *thread_config,
thread_id_t thread_id, char *buffer, size_t buffer_length)
{ if (thread_id == thread_config->journal_thread) { if (thread_config->packer_thread == thread_id) { /* * This is the "single thread" config where one thread is used for the * journal, packer, logical, physical, and hash zones. In that case, it is * known as the "request queue."
*/
snprintf(buffer, buffer_length, "reqQ"); return;
}
if (get_zone_thread_name(thread_config->logical_threads,
thread_config->logical_zone_count,
thread_id, "logQ", buffer, buffer_length)) return;
if (get_zone_thread_name(thread_config->physical_threads,
thread_config->physical_zone_count,
thread_id, "physQ", buffer, buffer_length)) return;
if (get_zone_thread_name(thread_config->hash_zone_threads,
thread_config->hash_zone_count,
thread_id, "hashQ", buffer, buffer_length)) return;
if (get_zone_thread_name(thread_config->bio_threads,
thread_config->bio_thread_count,
thread_id, "bioQ", buffer, buffer_length)) return;
/* Some sort of misconfiguration? */
snprintf(buffer, buffer_length, "reqQ%d", thread_id);
}
/** * vdo_make_thread() - Construct a single vdo work_queue and its associated thread (or threads for * round-robin queues). * @vdo: The vdo which owns the thread. * @thread_id: The id of the thread to create (as determined by the thread_config). * @type: The description of the work queue for this thread. * @queue_count: The number of actual threads/queues contained in the "thread". * @contexts: An array of queue_count contexts, one for each individual queue; may be NULL. * * Each "thread" constructed by this method is represented by a unique thread id in the thread * config, and completions can be enqueued to the queue and run on the threads comprising this * entity. * * Return: VDO_SUCCESS or an error.
*/ int vdo_make_thread(struct vdo *vdo, thread_id_t thread_id, conststruct vdo_work_queue_type *type, unsignedint queue_count, void *contexts[])
{ struct vdo_thread *thread = &vdo->threads[thread_id]; char queue_name[MAX_VDO_WORK_QUEUE_NAME_LEN];
if (type == NULL)
type = &default_queue_type;
if (thread->queue != NULL) { return VDO_ASSERT(vdo_work_queue_type_is(thread->queue, type), "already constructed vdo thread %u is of the correct type",
thread_id);
}
/** * register_vdo() - Register a VDO; it must not already be registered. * @vdo: The vdo to register. * * Return: VDO_SUCCESS or an error.
*/ staticint register_vdo(struct vdo *vdo)
{ int result;
write_lock(®istry.lock);
result = VDO_ASSERT(filter_vdos_locked(vdo_is_equal, vdo) == NULL, "VDO not already registered"); if (result == VDO_SUCCESS) {
INIT_LIST_HEAD(&vdo->registration);
list_add_tail(&vdo->registration, ®istry.links);
}
write_unlock(®istry.lock);
return result;
}
/** * initialize_vdo() - Do the portion of initializing a vdo which will clean up after itself on * error. * @vdo: The vdo being initialized * @config: The configuration of the vdo * @instance: The instance number of the vdo * @reason: The buffer to hold the failure reason on error
*/ staticint initialize_vdo(struct vdo *vdo, struct device_config *config, unsignedint instance, char **reason)
{ int result;
zone_count_t i;
/** * vdo_make() - Allocate and initialize a vdo. * @instance: Device instantiation counter. * @config: The device configuration. * @reason: The reason for any failure during this call. * @vdo_ptr: A pointer to hold the created vdo. * * Return: VDO_SUCCESS or an error.
*/ int vdo_make(unsignedint instance, struct device_config *config, char **reason, struct vdo **vdo_ptr)
{ int result; struct vdo *vdo;
/* Initialize with a generic failure reason to prevent returning garbage. */
*reason = "Unspecified error";
result = vdo_allocate(1, struct vdo, __func__, &vdo); if (result != VDO_SUCCESS) {
*reason = "Cannot allocate VDO"; return result;
}
result = initialize_vdo(vdo, config, instance, reason); if (result != VDO_SUCCESS) {
vdo_destroy(vdo); return result;
}
/* From here on, the caller will clean up if there is an error. */
*vdo_ptr = vdo;
for (i = 0; i < vdo->thread_config.thread_count; i++)
vdo_finish_work_queue(vdo->threads[i].queue);
}
/** * free_listeners() - Free the list of read-only listeners associated with a thread. * @thread: The thread holding the list to free.
*/ staticvoid free_listeners(struct vdo_thread *thread)
{ struct read_only_listener *listener, *next;
for (listener = vdo_forget(thread->listeners); listener != NULL; listener = next) {
next = vdo_forget(listener->next);
vdo_free(listener);
}
}
/** * unregister_vdo() - Remove a vdo from the device registry. * @vdo: The vdo to remove.
*/ staticvoid unregister_vdo(struct vdo *vdo)
{
write_lock(®istry.lock); if (filter_vdos_locked(vdo_is_equal, vdo) == vdo)
list_del_init(&vdo->registration);
write_unlock(®istry.lock);
}
/** * vdo_destroy() - Destroy a vdo instance. * @vdo: The vdo to destroy (may be NULL).
*/ void vdo_destroy(struct vdo *vdo)
{ unsignedint i;
if (vdo == NULL) return;
/* A running VDO should never be destroyed without suspending first. */
BUG_ON(vdo_get_admin_state(vdo)->normal);
if (vdo->threads != NULL) { for (i = 0; i < vdo->thread_config.thread_count; i++) {
free_listeners(&vdo->threads[i]);
vdo_free_work_queue(vdo_forget(vdo->threads[i].queue));
}
vdo_free(vdo_forget(vdo->threads));
}
uninitialize_thread_config(&vdo->thread_config);
if (vdo->compression_context != NULL) { for (i = 0; i < vdo->device_config->thread_counts.cpu_threads; i++)
vdo_free(vdo_forget(vdo->compression_context[i]));
/** * finish_reading_super_block() - Continue after loading the super block. * @completion: The super block vio. * * This callback is registered in vdo_load_super_block().
*/ staticvoid finish_reading_super_block(struct vdo_completion *completion)
{ struct vdo_super_block *super_block =
container_of(as_vio(completion), struct vdo_super_block, vio);
/** * handle_super_block_read_error() - Handle an error reading the super block. * @completion: The super block vio. * * This error handler is registered in vdo_load_super_block().
*/ staticvoid handle_super_block_read_error(struct vdo_completion *completion)
{
vio_record_metadata_io_error(as_vio(completion));
finish_reading_super_block(completion);
}
/** * vdo_load_super_block() - Allocate a super block and read its contents from storage. * @vdo: The vdo containing the super block on disk. * @parent: The completion to notify after loading the super block.
*/ void vdo_load_super_block(struct vdo *vdo, struct vdo_completion *parent)
{ int result;
result = initialize_super_block(vdo, &vdo->super_block); if (result != VDO_SUCCESS) {
vdo_continue_completion(parent, result); return;
}
/** * vdo_get_backing_device() - Get the block device object underlying a vdo. * @vdo: The vdo. * * Return: The vdo's current block device.
*/ struct block_device *vdo_get_backing_device(conststruct vdo *vdo)
{ return vdo->device_config->owned_device->bdev;
}
/** * vdo_get_device_name() - Get the device name associated with the vdo target. * @target: The target device interface. * * Return: The block device name.
*/ constchar *vdo_get_device_name(conststruct dm_target *target)
{ return dm_device_name(dm_table_get_md(target->table));
}
/** * vdo_synchronous_flush() - Issue a flush request and wait for it to complete. * @vdo: The vdo. * * Return: VDO_SUCCESS or an error.
*/ int vdo_synchronous_flush(struct vdo *vdo)
{ int result; struct bio bio;
atomic64_inc(&vdo->stats.flush_out); if (result != 0) {
vdo_log_error_strerror(result, "synchronous flush failed");
result = -EIO;
}
bio_uninit(&bio); return result;
}
/** * vdo_get_state() - Get the current state of the vdo. * @vdo: The vdo. * * Context: This method may be called from any thread. * * Return: The current state of the vdo.
*/ enum vdo_state vdo_get_state(conststruct vdo *vdo)
{ enum vdo_state state = atomic_read(&vdo->state);
/* pairs with barriers where state field is changed */
smp_rmb(); return state;
}
/** * vdo_set_state() - Set the current state of the vdo. * @vdo: The vdo whose state is to be set. * @state: The new state of the vdo. * * Context: This method may be called from any thread.
*/ void vdo_set_state(struct vdo *vdo, enum vdo_state state)
{ /* pairs with barrier in vdo_get_state */
smp_wmb();
atomic_set(&vdo->state, state);
}
/** * vdo_get_admin_state() - Get the admin state of the vdo. * @vdo: The vdo. * * Return: The code for the vdo's current admin state.
*/ conststruct admin_state_code *vdo_get_admin_state(conststruct vdo *vdo)
{ return vdo_get_admin_state_code(&vdo->admin.state);
}
/** * record_vdo() - Record the state of the VDO for encoding in the super block.
*/ staticvoid record_vdo(struct vdo *vdo)
{ /* This is for backwards compatibility. */
vdo->states.unused = vdo->geometry.unused;
vdo->states.vdo.state = vdo_get_state(vdo);
vdo->states.block_map = vdo_record_block_map(vdo->block_map);
vdo->states.recovery_journal = vdo_record_recovery_journal(vdo->recovery_journal);
vdo->states.slab_depot = vdo_record_slab_depot(vdo->depot);
vdo->states.layout = vdo->layout;
}
/** * continue_super_block_parent() - Continue the parent of a super block save operation. * @completion: The super block vio. * * This callback is registered in vdo_save_components().
*/ staticvoid continue_super_block_parent(struct vdo_completion *completion)
{
vdo_continue_completion(vdo_forget(completion->parent), completion->result);
}
/** * handle_save_error() - Log a super block save error. * @completion: The super block vio. * * This error handler is registered in vdo_save_components().
*/ staticvoid handle_save_error(struct vdo_completion *completion)
{ struct vdo_super_block *super_block =
container_of(as_vio(completion), struct vdo_super_block, vio);
vio_record_metadata_io_error(&super_block->vio);
vdo_log_error_strerror(completion->result, "super block save failed"); /* * Mark the super block as unwritable so that we won't attempt to write it again. This * avoids the case where a growth attempt fails writing the super block with the new size, * but the subsequent attempt to write out the read-only state succeeds. In this case, * writes which happened just before the suspend would not be visible if the VDO is * restarted without rebuilding, but, after a read-only rebuild, the effects of those * writes would reappear.
*/
super_block->unwritable = true;
completion->callback(completion);
}
/** * vdo_save_components() - Encode the vdo and save the super block asynchronously. * @vdo: The vdo whose state is being saved. * @parent: The completion to notify when the save is complete.
*/ void vdo_save_components(struct vdo *vdo, struct vdo_completion *parent)
{ struct vdo_super_block *super_block = &vdo->super_block;
if (super_block->unwritable) {
vdo_continue_completion(parent, VDO_READ_ONLY); return;
}
if (super_block->vio.completion.parent != NULL) {
vdo_continue_completion(parent, VDO_COMPONENT_BUSY); return;
}
/** * vdo_register_read_only_listener() - Register a listener to be notified when the VDO goes * read-only. * @vdo: The vdo to register with. * @listener: The object to notify. * @notification: The function to call to send the notification. * @thread_id: The id of the thread on which to send the notification. * * Return: VDO_SUCCESS or an error.
*/ int vdo_register_read_only_listener(struct vdo *vdo, void *listener,
vdo_read_only_notification_fn notification,
thread_id_t thread_id)
{ struct vdo_thread *thread = &vdo->threads[thread_id]; struct read_only_listener *read_only_listener; int result;
result = VDO_ASSERT(thread_id != vdo->thread_config.dedupe_thread, "read only listener not registered on dedupe thread"); if (result != VDO_SUCCESS) return result;
result = vdo_allocate(1, struct read_only_listener, __func__,
&read_only_listener); if (result != VDO_SUCCESS) return result;
/** * notify_vdo_of_read_only_mode() - Notify a vdo that it is going read-only. * @listener: The vdo. * @parent: The completion to notify in order to acknowledge the notification. * * This will save the read-only state to the super block. * * Implements vdo_read_only_notification_fn.
*/ staticvoid notify_vdo_of_read_only_mode(void *listener, struct vdo_completion *parent)
{ struct vdo *vdo = listener;
if (vdo_in_read_only_mode(vdo))
vdo_finish_completion(parent);
/** * vdo_enable_read_only_entry() - Enable a vdo to enter read-only mode on errors. * @vdo: The vdo to enable. * * Return: VDO_SUCCESS or an error.
*/ int vdo_enable_read_only_entry(struct vdo *vdo)
{
thread_id_t id; bool is_read_only = vdo_in_read_only_mode(vdo); struct read_only_notifier *notifier = &vdo->read_only_notifier;
/** * vdo_wait_until_not_entering_read_only_mode() - Wait until no read-only notifications are in * progress and prevent any subsequent * notifications. * @parent: The completion to notify when no threads are entering read-only mode. * * Notifications may be re-enabled by calling vdo_allow_read_only_mode_entry().
*/ void vdo_wait_until_not_entering_read_only_mode(struct vdo_completion *parent)
{ struct vdo *vdo = parent->vdo; struct read_only_notifier *notifier = &vdo->read_only_notifier;
vdo_assert_on_admin_thread(vdo, __func__);
if (notifier->waiter != NULL) {
vdo_continue_completion(parent, VDO_COMPONENT_BUSY); return;
}
if (notifier->waiter == NULL) { /* * A notification was not in progress, and now they are * disallowed.
*/
vdo_launch_completion(parent); return;
}
}
/** * as_notifier() - Convert a generic vdo_completion to a read_only_notifier. * @completion: The completion to convert. * * Return: The completion as a read_only_notifier.
*/ staticinlinestruct read_only_notifier *as_notifier(struct vdo_completion *completion)
{
vdo_assert_completion_type(completion, VDO_READ_ONLY_MODE_COMPLETION); return container_of(completion, struct read_only_notifier, completion);
}
/** * finish_entering_read_only_mode() - Complete the process of entering read only mode. * @completion: The read-only mode completion.
*/ staticvoid finish_entering_read_only_mode(struct vdo_completion *completion)
{ struct read_only_notifier *notifier = as_notifier(completion);
if (listener != NULL) { /* We have a listener to notify */
vdo_prepare_completion(completion, make_thread_read_only,
make_thread_read_only, thread_id,
listener);
listener->notify(listener->listener, completion); return;
}
/* We're done with this thread */ if (++thread_id == vdo->thread_config.dedupe_thread) { /* * We don't want to notify the dedupe thread since it may be * blocked rebuilding the index.
*/
thread_id++;
}
if (thread_id >= vdo->thread_config.thread_count) { /* There are no more threads */
vdo_prepare_completion(completion, finish_entering_read_only_mode,
finish_entering_read_only_mode,
vdo->thread_config.admin_thread, NULL);
} else {
vdo_prepare_completion(completion, make_thread_read_only,
make_thread_read_only, thread_id, NULL);
}
vdo_launch_completion(completion);
}
/** * vdo_allow_read_only_mode_entry() - Allow the notifier to put the VDO into read-only mode, * reversing the effects of * vdo_wait_until_not_entering_read_only_mode(). * @parent: The object to notify once the operation is complete. * * If some thread tried to put the vdo into read-only mode while notifications were disallowed, it * will be done when this method is called. If that happens, the parent will not be notified until * the vdo has actually entered read-only mode and attempted to save the super block. * * Context: This method may only be called from the admin thread.
*/ void vdo_allow_read_only_mode_entry(struct vdo_completion *parent)
{ struct vdo *vdo = parent->vdo; struct read_only_notifier *notifier = &vdo->read_only_notifier;
vdo_assert_on_admin_thread(vdo, __func__);
if (notifier->waiter != NULL) {
vdo_continue_completion(parent, VDO_COMPONENT_BUSY); return;
}
/* Do the pending notification. */
make_thread_read_only(¬ifier->completion);
}
/** * vdo_enter_read_only_mode() - Put a VDO into read-only mode and save the read-only state in the * super block. * @vdo: The vdo. * @error_code: The error which caused the VDO to enter read-only mode. * * This method is a no-op if the VDO is already read-only.
*/ void vdo_enter_read_only_mode(struct vdo *vdo, int error_code)
{ bool notify = false;
thread_id_t thread_id = vdo_get_callback_thread_id(); struct read_only_notifier *notifier = &vdo->read_only_notifier; struct vdo_thread *thread;
if (thread_id != VDO_INVALID_THREAD_ID) {
thread = &vdo->threads[thread_id]; if (thread->is_read_only) { /* This thread has already gone read-only. */ return;
}
/* Record for this thread that the VDO is read-only. */
thread->is_read_only = true;
}
if (!notify) { /* The notifier is already aware of a read-only error */ return;
}
/* Initiate a notification starting on the lowest numbered thread. */
vdo_launch_completion_callback(¬ifier->completion, make_thread_read_only, 0);
}
/** * vdo_is_read_only() - Check whether the VDO is read-only. * @vdo: The vdo. * * Return: true if the vdo is read-only. * * This method may be called from any thread, as opposed to examining the VDO's state field which * is only safe to check from the admin thread.
*/ bool vdo_is_read_only(struct vdo *vdo)
{ return vdo->threads[vdo_get_callback_thread_id()].is_read_only;
}
/** * vdo_in_read_only_mode() - Check whether a vdo is in read-only mode. * @vdo: The vdo to query. * * Return: true if the vdo is in read-only mode.
*/ bool vdo_in_read_only_mode(conststruct vdo *vdo)
{ return (vdo_get_state(vdo) == VDO_READ_ONLY_MODE);
}
/** * vdo_in_recovery_mode() - Check whether the vdo is in recovery mode. * @vdo: The vdo to query. * * Return: true if the vdo is in recovery mode.
*/ bool vdo_in_recovery_mode(conststruct vdo *vdo)
{ return (vdo_get_state(vdo) == VDO_RECOVERING);
}
/** * vdo_enter_recovery_mode() - Put the vdo into recovery mode. * @vdo: The vdo.
*/ void vdo_enter_recovery_mode(struct vdo *vdo)
{
vdo_assert_on_admin_thread(vdo, __func__);
/** * complete_synchronous_action() - Signal the waiting thread that a synchronous action is complete. * @completion: The sync completion.
*/ staticvoid complete_synchronous_action(struct vdo_completion *completion)
{
vdo_assert_completion_type(completion, VDO_SYNC_COMPLETION);
complete(&(container_of(completion, struct sync_completion,
vdo_completion)->completion));
}
/** * perform_synchronous_action() - Launch an action on a VDO thread and wait for it to complete. * @vdo: The vdo. * @action: The callback to launch. * @thread_id: The thread on which to run the action. * @parent: The parent of the sync completion (may be NULL).
*/ staticint perform_synchronous_action(struct vdo *vdo, vdo_action_fn action,
thread_id_t thread_id, void *parent)
{ struct sync_completion sync;
/** * set_compression_callback() - Callback to turn compression on or off. * @completion: The completion.
*/ staticvoid set_compression_callback(struct vdo_completion *completion)
{ struct vdo *vdo = completion->vdo; bool *enable = completion->parent; bool was_enabled = vdo_get_compressing(vdo);
if (*enable != was_enabled) {
WRITE_ONCE(vdo->compressing, *enable); if (was_enabled) { /* Signal the packer to flush since compression has been disabled. */
vdo_flush_packer(vdo->packer);
}
}
/** * vdo_set_compressing() - Turn compression on or off. * @vdo: The vdo. * @enable: Whether to enable or disable compression. * * Return: Whether compression was previously on or off.
*/ bool vdo_set_compressing(struct vdo *vdo, bool enable)
{
perform_synchronous_action(vdo, set_compression_callback,
vdo->thread_config.packer_thread,
&enable); return enable;
}
/** * vdo_get_compressing() - Get whether compression is enabled in a vdo. * @vdo: The vdo. * * Return: State of compression.
*/ bool vdo_get_compressing(struct vdo *vdo)
{ return READ_ONCE(vdo->compressing);
}
staticstruct error_statistics __must_check get_vdo_error_statistics(conststruct vdo *vdo)
{ /* * The error counts can be incremented from arbitrary threads and so must be incremented * atomically, but they are just statistics with no semantics that could rely on memory * order, so unfenced reads are sufficient.
*/ conststruct atomic_statistics *atoms = &vdo->stats;
/** * vdo_get_physical_blocks_allocated() - Get the number of physical blocks in use by user data. * @vdo: The vdo. * * Return: The number of blocks allocated for user data.
*/ static block_count_t __must_check vdo_get_physical_blocks_allocated(conststruct vdo *vdo)
{ return (vdo_get_slab_depot_allocated_blocks(vdo->depot) -
vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
}
/** * vdo_get_physical_blocks_overhead() - Get the number of physical blocks used by vdo metadata. * @vdo: The vdo. * * Return: The number of overhead blocks.
*/ static block_count_t __must_check vdo_get_physical_blocks_overhead(conststruct vdo *vdo)
{ /* * config.physical_blocks is mutated during resize and is in a packed structure, * but resize runs on admin thread. * TODO: Verify that this is always safe.
*/ return (vdo->states.vdo.config.physical_blocks -
vdo_get_slab_depot_data_blocks(vdo->depot) +
vdo_get_journal_block_map_data_blocks_used(vdo->recovery_journal));
}
staticconstchar *vdo_describe_state(enum vdo_state state)
{ /* These strings should all fit in the 15 chars of VDOStatistics.mode. */ switch (state) { case VDO_RECOVERING: return"recovering";
case VDO_READ_ONLY_MODE: return"read-only";
default: return"normal";
}
}
/** * get_vdo_statistics() - Populate a vdo_statistics structure on the admin thread. * @vdo: The vdo. * @stats: The statistics structure to populate.
*/ staticvoid get_vdo_statistics(conststruct vdo *vdo, struct vdo_statistics *stats)
{ struct recovery_journal *journal = vdo->recovery_journal; enum vdo_state state = vdo_get_state(vdo);
vdo_assert_on_admin_thread(vdo, __func__);
/* start with a clean slate */
memset(stats, 0, sizeof(struct vdo_statistics));
/* * These are immutable properties of the vdo object, so it is safe to query them from any * thread.
*/
stats->version = STATISTICS_VERSION;
stats->logical_blocks = vdo->states.vdo.config.logical_blocks; /* * config.physical_blocks is mutated during resize and is in a packed structure, but resize * runs on the admin thread. * TODO: verify that this is always safe
*/
stats->physical_blocks = vdo->states.vdo.config.physical_blocks;
stats->block_size = VDO_BLOCK_SIZE;
stats->complete_recoveries = vdo->states.vdo.complete_recoveries;
stats->read_only_recoveries = vdo->states.vdo.read_only_recoveries;
stats->block_map_cache_size = get_block_map_cache_size(vdo);
/** * vdo_fetch_statistics_callback() - Action to populate a vdo_statistics * structure on the admin thread. * @completion: The completion. * * This callback is registered in vdo_fetch_statistics().
*/ staticvoid vdo_fetch_statistics_callback(struct vdo_completion *completion)
{
get_vdo_statistics(completion->vdo, completion->parent);
complete_synchronous_action(completion);
}
/** * vdo_fetch_statistics() - Fetch statistics on the correct thread. * @vdo: The vdo. * @stats: The vdo statistics are returned here.
*/ void vdo_fetch_statistics(struct vdo *vdo, struct vdo_statistics *stats)
{
perform_synchronous_action(vdo, vdo_fetch_statistics_callback,
vdo->thread_config.admin_thread, stats);
}
/** * vdo_get_callback_thread_id() - Get the id of the callback thread on which a completion is * currently running. * * Return: The current thread ID, or -1 if no such thread.
*/
thread_id_t vdo_get_callback_thread_id(void)
{ struct vdo_work_queue *queue = vdo_get_current_work_queue(); struct vdo_thread *thread;
thread_id_t thread_id;
if (PARANOID_THREAD_CONSISTENCY_CHECKS) {
BUG_ON(thread_id >= thread->vdo->thread_config.thread_count);
BUG_ON(thread != &thread->vdo->threads[thread_id]);
}
return thread_id;
}
/** * vdo_dump_status() - Dump status information about a vdo to the log for debugging. * @vdo: The vdo to dump.
*/ void vdo_dump_status(conststruct vdo *vdo)
{
zone_count_t zone;
for (zone = 0; zone < vdo->thread_config.logical_zone_count; zone++)
vdo_dump_logical_zone(&vdo->logical_zones->zones[zone]);
for (zone = 0; zone < vdo->thread_config.physical_zone_count; zone++)
vdo_dump_physical_zone(&vdo->physical_zones->zones[zone]);
vdo_dump_hash_zones(vdo->hash_zones);
}
/** * vdo_assert_on_admin_thread() - Assert that we are running on the admin thread. * @vdo: The vdo. * @name: The name of the function which should be running on the admin thread (for logging).
*/ void vdo_assert_on_admin_thread(conststruct vdo *vdo, constchar *name)
{
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() == vdo->thread_config.admin_thread), "%s called on admin thread", name);
}
/** * vdo_assert_on_logical_zone_thread() - Assert that this function was called on the specified * logical zone thread. * @vdo: The vdo. * @logical_zone: The number of the logical zone. * @name: The name of the calling function.
*/ void vdo_assert_on_logical_zone_thread(conststruct vdo *vdo, zone_count_t logical_zone, constchar *name)
{
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
vdo->thread_config.logical_threads[logical_zone]), "%s called on logical thread", name);
}
/** * vdo_assert_on_physical_zone_thread() - Assert that this function was called on the specified * physical zone thread. * @vdo: The vdo. * @physical_zone: The number of the physical zone. * @name: The name of the calling function.
*/ void vdo_assert_on_physical_zone_thread(conststruct vdo *vdo,
zone_count_t physical_zone, constchar *name)
{
VDO_ASSERT_LOG_ONLY((vdo_get_callback_thread_id() ==
vdo->thread_config.physical_threads[physical_zone]), "%s called on physical thread", name);
}
/** * vdo_get_physical_zone() - Get the physical zone responsible for a given physical block number. * @vdo: The vdo containing the physical zones. * @pbn: The PBN of the data block. * @zone_ptr: A pointer to return the physical zone. * * Gets the physical zone responsible for a given physical block number of a data block in this vdo * instance, or of the zero block (for which a NULL zone is returned). For any other block number * that is not in the range of valid data block numbers in any slab, an error will be returned. * This function is safe to call on invalid block numbers; it will not put the vdo into read-only * mode. * * Return: VDO_SUCCESS or VDO_OUT_OF_RANGE if the block number is invalid or an error code for any * other failure.
*/ int vdo_get_physical_zone(conststruct vdo *vdo, physical_block_number_t pbn, struct physical_zone **zone_ptr)
{ struct vdo_slab *slab; int result;
/* * Used because it does a more restrictive bounds check than vdo_get_slab(), and done first * because it won't trigger read-only mode on an invalid PBN.
*/ if (!vdo_is_physical_data_block(vdo->depot, pbn)) return VDO_OUT_OF_RANGE;
/* With the PBN already checked, we should always succeed in finding a slab. */
slab = vdo_get_slab(vdo->depot, pbn);
result = VDO_ASSERT(slab != NULL, "vdo_get_slab must succeed on all valid PBNs"); if (result != VDO_SUCCESS) return result;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.