/* A position in the arboreal block map at a specific level. */ struct block_map_tree_slot {
page_number_t page_index; struct block_map_slot block_map_slot;
};
/* Fields for using the arboreal block map. */ struct tree_lock { /* The current height at which this data_vio is operating */
height_t height; /* The block map tree for this LBN */
root_count_t root_index; /* Whether we hold a page lock */ bool locked; /* The key for the lock map */
u64 key; /* The queue of waiters for the page this vio is allocating or loading */ struct vdo_wait_queue waiters; /* The block map tree slots for this LBN */ struct block_map_tree_slot tree_slots[VDO_BLOCK_MAP_TREE_HEIGHT + 1];
};
/* * Where a data_vio is on the compression path; advance_compression_stage() depends on the order of * this enum.
*/ enum data_vio_compression_stage { /* A data_vio which has not yet entered the compression path */
DATA_VIO_PRE_COMPRESSOR, /* A data_vio which is in the compressor */
DATA_VIO_COMPRESSING, /* A data_vio which is blocked in the packer */
DATA_VIO_PACKING, /* A data_vio which is no longer on the compression path (and never will be) */
DATA_VIO_POST_PACKER,
};
struct compression_state { /* * The current compression status of this data_vio. This field contains a value which * consists of a data_vio_compression_stage and a flag indicating whether a request has * been made to cancel (or prevent) compression for this data_vio. * * This field should be accessed through the get_data_vio_compression_status() and * set_data_vio_compression_status() methods. It should not be accessed directly.
*/
atomic_t status;
/* The compressed size of this block */
u16 size;
/* The packer input or output bin slot which holds the enclosing data_vio */
slot_number_t slot;
/* The packer bin to which the enclosing data_vio has been assigned */ struct packer_bin *bin;
/* A link in the chain of data_vios which have been packed together */ struct data_vio *next_in_batch;
/* A vio which is blocked in the packer while holding a lock this vio needs. */ struct data_vio *lock_holder;
/* * The compressed block used to hold the compressed form of this block and that of any * other blocks for which this data_vio is the compressed write agent.
*/ struct compressed_block *block;
};
/* Fields supporting allocation of data blocks. */ struct allocation { /* The physical zone in which to allocate a physical block */ struct physical_zone *zone;
/* The block allocated to this vio */
physical_block_number_t pbn;
/* * If non-NULL, the pooled PBN lock held on the allocated block. Must be a write lock until * the block has been written, after which it will become a read lock.
*/ struct pbn_lock *lock;
/* The type of write lock to obtain on the allocated block */ enum pbn_lock_type write_lock_type;
/* The zone which was the start of the current allocation cycle */
zone_count_t first_allocation_zone;
/* Whether this vio should wait for a clean slab */ bool wait_for_clean_slab;
};
/* A vio for processing user data requests. */ struct data_vio { /* The vdo_wait_queue entry structure */ struct vdo_waiter waiter;
/* The logical block of this request */ struct lbn_lock logical;
/* The state for traversing the block map tree */ struct tree_lock tree_lock;
/* The current partition address of this block */ struct zoned_pbn mapped;
/* The hash of this vio (if not zero) */ struct uds_record_name record_name;
/* Used for logging and debugging */ enum async_operation_number last_async_operation;
/* The operations to record in the recovery and slab journals */ struct reference_updater increment_updater; struct reference_updater decrement_updater;
/* * Whether this vio has received an allocation. This field is examined from threads not in * the allocation zone.
*/ bool allocation_succeeded;
/* The new partition address of this block after the vio write completes */ struct zoned_pbn new_mapped;
/* The hash zone responsible for the name (NULL if is_zero_block) */ struct hash_zone *hash_zone;
/* The lock this vio holds or shares with other vios with the same data */ struct hash_lock *hash_lock;
/* All data_vios sharing a hash lock are kept in a list linking these list entries */ struct list_head hash_lock_entry;
/* The block number in the partition of the UDS deduplication advice */ struct zoned_pbn duplicate;
/* * The sequence number of the recovery journal block containing the increment entry for * this vio.
*/
sequence_number_t recovery_sequence_number;
/* The point in the recovery journal where this write last made an entry */ struct journal_point recovery_journal_point;
/* The list of vios in user initiated write requests */ struct list_head write_entry;
/* The generation number of the VDO that this vio belongs to */
sequence_number_t flush_generation;
/* The completion to use for fetching block map pages for this vio */ struct vdo_page_completion page_completion;
/* The user bio that initiated this VIO */ struct bio *user_bio;
/* partial block support */
block_size_t offset;
/* * The number of bytes to be discarded. For discards, this field will always be positive, * whereas for non-discards it will always be 0. Hence it can be used to determine whether * a data_vio is processing a discard, even after the user_bio has been acknowledged.
*/
u32 remaining_discard;
struct dedupe_context *dedupe_context;
/* Fields beyond this point will not be reset when a pooled data_vio is reused. */
struct vio vio;
/* The completion for making reference count decrements */ struct vdo_completion decrement_completion;
/* All of the fields necessary for the compression path */ struct compression_state compression;
/* A block used as output during compression or uncompression */ char *scratch_block;
struct list_head pool_entry;
};
staticinlinestruct data_vio *vio_as_data_vio(struct vio *vio)
{
VDO_ASSERT_LOG_ONLY((vio->type == VIO_TYPE_DATA), "vio is a data_vio"); return container_of(vio, struct data_vio, vio);
}
/** * continue_data_vio_with_error() - Set an error code and then continue processing a data_vio. * * This will not mask older errors. This function can be called with a success code, but it is more * efficient to call continue_data_vio() if the caller knows the result was a success.
*/ staticinlinevoid continue_data_vio_with_error(struct data_vio *data_vio, int result)
{
vdo_continue_completion(&data_vio->vio.completion, result);
}
staticinlinevoid assert_data_vio_in_hash_zone(struct data_vio *data_vio)
{
thread_id_t expected = data_vio->hash_zone->thread_id;
thread_id_t thread_id = vdo_get_callback_thread_id(); /* * It's odd to use the LBN, but converting the record name to hex is a bit clunky for an * inline, and the LBN better than nothing as an identifier.
*/
VDO_ASSERT_LOG_ONLY((expected == thread_id), "data_vio for logical block %llu on thread %u, should be on hash zone thread %u",
(unsignedlonglong) data_vio->logical.lbn, thread_id, expected);
}
/** * launch_data_vio_hash_zone_callback() - Set a callback as a hash zone operation and invoke it * immediately.
*/ staticinlinevoid launch_data_vio_hash_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_hash_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
VDO_ASSERT_LOG_ONLY((expected == thread_id), "data_vio for logical block %llu on thread %u, should be on thread %u",
(unsignedlonglong) data_vio->logical.lbn, thread_id, expected);
}
/** * launch_data_vio_logical_callback() - Set a callback as a logical block operation and invoke it * immediately.
*/ staticinlinevoid launch_data_vio_logical_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_logical_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
VDO_ASSERT_LOG_ONLY((expected == thread_id), "struct data_vio for allocated physical block %llu on thread %u, should be on thread %u",
(unsignedlonglong) data_vio->allocation.pbn, thread_id,
expected);
}
/** * launch_data_vio_allocated_zone_callback() - Set a callback as a physical block operation in a * data_vio's allocated zone and queue the data_vio and * invoke it immediately.
*/ staticinlinevoid launch_data_vio_allocated_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_allocated_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
VDO_ASSERT_LOG_ONLY((expected == thread_id), "data_vio for duplicate physical block %llu on thread %u, should be on thread %u",
(unsignedlonglong) data_vio->duplicate.pbn, thread_id,
expected);
}
/** * launch_data_vio_duplicate_zone_callback() - Set a callback as a physical block operation in a * data_vio's duplicate zone and queue the data_vio and * invoke it immediately.
*/ staticinlinevoid launch_data_vio_duplicate_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_duplicate_zone_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
VDO_ASSERT_LOG_ONLY((expected == thread_id), "data_vio for mapped physical block %llu on thread %u, should be on thread %u",
(unsignedlonglong) data_vio->mapped.pbn, thread_id, expected);
}
VDO_ASSERT_LOG_ONLY((expected == thread_id), "data_vio for new_mapped physical block %llu on thread %u, should be on thread %u",
(unsignedlonglong) data_vio->new_mapped.pbn, thread_id,
expected);
}
VDO_ASSERT_LOG_ONLY((journal_thread == thread_id), "data_vio for logical block %llu on thread %u, should be on journal thread %u",
(unsignedlonglong) data_vio->logical.lbn, thread_id,
journal_thread);
}
/** * launch_data_vio_journal_callback() - Set a callback as a journal operation and invoke it * immediately.
*/ staticinlinevoid launch_data_vio_journal_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_journal_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
VDO_ASSERT_LOG_ONLY((packer_thread == thread_id), "data_vio for logical block %llu on thread %u, should be on packer thread %u",
(unsignedlonglong) data_vio->logical.lbn, thread_id,
packer_thread);
}
/** * launch_data_vio_packer_callback() - Set a callback as a packer operation and invoke it * immediately.
*/ staticinlinevoid launch_data_vio_packer_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_packer_callback(data_vio, callback);
vdo_launch_completion(&data_vio->vio.completion);
}
VDO_ASSERT_LOG_ONLY((cpu_thread == thread_id), "data_vio for logical block %llu on thread %u, should be on cpu thread %u",
(unsignedlonglong) data_vio->logical.lbn, thread_id,
cpu_thread);
}
/** * launch_data_vio_cpu_callback() - Set a callback to run on the CPU queues and invoke it * immediately.
*/ staticinlinevoid launch_data_vio_cpu_callback(struct data_vio *data_vio,
vdo_action_fn callback, enum vdo_completion_priority priority)
{
set_data_vio_cpu_callback(data_vio, callback);
vdo_launch_completion_with_priority(&data_vio->vio.completion, priority);
}
/** * launch_data_vio_bio_zone_callback() - Set a callback as a bio zone operation and invoke it * immediately.
*/ staticinlinevoid launch_data_vio_bio_zone_callback(struct data_vio *data_vio,
vdo_action_fn callback)
{
set_data_vio_bio_zone_callback(data_vio, callback);
vdo_launch_completion_with_priority(&data_vio->vio.completion,
BIO_Q_DATA_PRIORITY);
}
/** * launch_data_vio_on_bio_ack_queue() - If the vdo uses a bio_ack queue, set a callback to run on * it and invoke it immediately, otherwise, just run the * callback on the current thread.
*/ staticinlinevoid launch_data_vio_on_bio_ack_queue(struct data_vio *data_vio,
vdo_action_fn callback)
{ struct vdo_completion *completion = &data_vio->vio.completion; struct vdo *vdo = completion->vdo;
if (!vdo_uses_bio_ack_queue(vdo)) {
callback(completion); return;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.