/* * The array is implemented as a fully populated btree, which points to * blocks that contain the packed values. This is more space efficient * than just using a btree since we don't store 1 key per value.
*/ struct array_block {
__le32 csum;
__le32 max_entries;
__le32 nr_entries;
__le32 value_size;
__le64 blocknr; /* Block this node is supposed to live in. */
} __packed;
/* * Validator methods. As usual we calculate a checksum, and also write the * block location into the header (paranoia about ssds remapping areas by * mistake).
*/ #define CSUM_XOR 595846735
/* * Functions for manipulating the array blocks.
*/
/* * Returns a pointer to a value within an array block. * * index - The index into _this_ specific block.
*/ staticvoid *element_at(struct dm_array_info *info, struct array_block *ab, unsignedint index)
{ unsignedchar *entry = (unsignedchar *) (ab + 1);
entry += index * info->value_type.size;
return entry;
}
/* * Utility function that calls one of the value_type methods on every value * in an array block.
*/ staticvoid on_entries(struct dm_array_info *info, struct array_block *ab, void (*fn)(void *, constvoid *, unsignedint))
{ unsignedint nr_entries = le32_to_cpu(ab->nr_entries);
/* * Increment every value in an array block.
*/ staticvoid inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
{ struct dm_btree_value_type *vt = &info->value_type;
if (vt->inc)
on_entries(info, ab, vt->inc);
}
/* * Decrement every value in an array block.
*/ staticvoid dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
{ struct dm_btree_value_type *vt = &info->value_type;
if (vt->dec)
on_entries(info, ab, vt->dec);
}
/* * Each array block can hold this many values.
*/ static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
{ return (size_of_block - sizeof(struct array_block)) / value_size;
}
/* * Allocate a new array block. The caller will need to unlock block.
*/ staticint alloc_ablock(struct dm_array_info *info, size_t size_of_block,
uint32_t max_entries, struct dm_block **block, struct array_block **ab)
{ int r;
r = dm_tm_new_block(info->btree_info.tm, &array_validator, block); if (r) return r;
/* * Pad an array block out with a particular value. Every instance will * cause an increment of the value_type. new_nr must always be more than * the current number of entries.
*/ staticvoid fill_ablock(struct dm_array_info *info, struct array_block *ab, constvoid *value, unsignedint new_nr)
{
uint32_t nr_entries, delta, i; struct dm_btree_value_type *vt = &info->value_type;
nr_entries = le32_to_cpu(ab->nr_entries);
delta = new_nr - nr_entries; if (vt->inc)
vt->inc(vt->context, value, delta); for (i = nr_entries; i < new_nr; i++)
memcpy(element_at(info, ab, i), value, vt->size);
ab->nr_entries = cpu_to_le32(new_nr);
}
/* * Remove some entries from the back of an array block. Every value * removed will be decremented. new_nr must be <= the current number of * entries.
*/ staticvoid trim_ablock(struct dm_array_info *info, struct array_block *ab, unsignedint new_nr)
{
uint32_t nr_entries, delta; struct dm_btree_value_type *vt = &info->value_type;
/* * Read locks a block, and coerces it to an array block. The caller must * unlock 'block' when finished.
*/ staticint get_ablock(struct dm_array_info *info, dm_block_t b, struct dm_block **block, struct array_block **ab)
{ int r;
r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block); if (r) return r;
/* * Looks up an array block in the btree, and then read locks it. * * index is the index of the index of the array_block, (ie. the array index * / max_entries).
*/ staticint lookup_ablock(struct dm_array_info *info, dm_block_t root, unsignedint index, struct dm_block **block, struct array_block **ab)
{ int r;
uint64_t key = index;
__le64 block_le;
r = dm_btree_lookup(&info->btree_info, root, &key, &block_le); if (r) return r;
staticint __shadow_ablock(struct dm_array_info *info, dm_block_t b, struct dm_block **block, struct array_block **ab)
{ int inc; int r = dm_tm_shadow_block(info->btree_info.tm, b,
&array_validator, block, &inc); if (r) return r;
*ab = dm_block_data(*block); if (inc)
inc_ablock_entries(info, *ab);
return 0;
}
/* * The shadow op will often be a noop. Only insert if it really * copied data.
*/ staticint __reinsert_ablock(struct dm_array_info *info, unsignedint index, struct dm_block *block, dm_block_t b,
dm_block_t *root)
{ int r = 0;
if (dm_block_location(block) != b) { /* * dm_tm_shadow_block will have already decremented the old * block, but it is still referenced by the btree. We * increment to stop the insert decrementing it below zero * when overwriting the old value.
*/
dm_tm_inc(info->btree_info.tm, b);
r = insert_ablock(info, index, block, root);
}
return r;
}
/* * Looks up an array block in the btree. Then shadows it, and updates the * btree to point to this new shadow. 'root' is an input/output parameter * for both the current root block, and the new one.
*/ staticint shadow_ablock(struct dm_array_info *info, dm_block_t *root, unsignedint index, struct dm_block **block, struct array_block **ab)
{ int r;
uint64_t key = index;
dm_block_t b;
__le64 block_le;
r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le); if (r) return r;
b = le64_to_cpu(block_le);
r = __shadow_ablock(info, b, block, ab); if (r) return r;
/* * Allocate an new array block, and fill it with some values.
*/ staticint insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
uint32_t max_entries, unsignedint block_index, uint32_t nr, constvoid *value, dm_block_t *root)
{ int r; struct dm_block *block; struct array_block *ab;
r = alloc_ablock(info, size_of_block, max_entries, &block, &ab); if (r) return r;
for (; !r && begin_block != end_block; begin_block++)
r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
return r;
}
/* * There are a bunch of functions involved with resizing an array. This * structure holds information that commonly needed by them. Purely here * to reduce parameter count.
*/ struct resize { /* * Describes the array.
*/ struct dm_array_info *info;
/* * The current root of the array. This gets updated.
*/
dm_block_t root;
/* * Metadata block size. Used to calculate the nr entries in an * array block.
*/
size_t size_of_block;
/* * Maximum nr entries in an array block.
*/ unsignedint max_entries;
/* * nr of completely full blocks in the array. * * 'old' refers to before the resize, 'new' after.
*/ unsignedint old_nr_full_blocks, new_nr_full_blocks;
/* * Number of entries in the final block. 0 iff only full blocks in * the array.
*/ unsignedint old_nr_entries_in_last_block, new_nr_entries_in_last_block;
/* * The default value used when growing the array.
*/ constvoid *value;
};
/* * Removes a consecutive set of array blocks from the btree. The values * in block are decremented as a side effect of the btree remove. * * begin_index - the index of the first array block to remove. * end_index - the one-past-the-end value. ie. this block is not removed.
*/ staticint drop_blocks(struct resize *resize, unsignedint begin_index, unsignedint end_index)
{ int r;
while (begin_index != end_index) {
uint64_t key = begin_index++;
r = dm_btree_remove(&resize->info->btree_info, resize->root,
&key, &resize->root); if (r) return r;
}
return 0;
}
/* * Calculates how many blocks are needed for the array.
*/ staticunsignedint total_nr_blocks_needed(unsignedint nr_full_blocks, unsignedint nr_entries_in_last_block)
{ return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
}
/* * Lose some blocks from the back?
*/ if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
resize->new_nr_entries_in_last_block);
end = total_nr_blocks_needed(resize->old_nr_full_blocks,
resize->old_nr_entries_in_last_block);
r = drop_blocks(resize, begin, end); if (r) return r;
}
/* * Trim the new tail block
*/ if (resize->new_nr_entries_in_last_block) {
r = shadow_ablock(resize->info, &resize->root,
resize->new_nr_full_blocks, &block, &ab); if (r) return r;
/* * These are the value_type functions for the btree elements, which point * to array blocks.
*/ staticvoid block_inc(void *context, constvoid *value, unsignedint count)
{ const __le64 *block_le = value; struct dm_array_info *info = context; unsignedint i;
for (i = 0; i < count; i++, block_le++)
dm_tm_inc(info->btree_info.tm, le64_to_cpu(*block_le));
}
memcpy(&block_le, value, sizeof(block_le));
b = le64_to_cpu(block_le);
r = dm_tm_ref(info->btree_info.tm, b, &ref_count); if (r) {
DMERR_LIMIT("couldn't get reference count for block %llu",
(unsignedlonglong) b); return;
}
if (ref_count == 1) { /* * We're about to drop the last reference to this ablock. * So we need to decrement the ref count of the contents.
*/
r = get_ablock(info, b, &block, &ab); if (r) {
DMERR_LIMIT("couldn't get array block %llu",
(unsignedlonglong) b); return;
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.3Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.