// SPDX-License-Identifier: GPL-2.0+ /* * Maple Tree implementation * Copyright (c) 2018-2022 Oracle Corporation * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> * Matthew Wilcox <willy@infradead.org> * Copyright (c) 2023 ByteDance * Author: Peng Zhang <zhangpeng.00@bytedance.com>
*/
/* * DOC: Interesting implementation details of the Maple Tree * * Each node type has a number of slots for entries and a number of slots for * pivots. In the case of dense nodes, the pivots are implied by the position * and are simply the slot index + the minimum of the node. * * In regular B-Tree terms, pivots are called keys. The term pivot is used to * indicate that the tree is specifying ranges. Pivots may appear in the * subtree with an entry attached to the value whereas keys are unique to a * specific position of a B-tree. Pivot values are inclusive of the slot with * the same index. * * * The following illustrates the layout of a range64 nodes slots and pivots. * * * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ * │ │ │ │ │ │ │ │ └─ Implied maximum * │ │ │ │ │ │ │ └─ Pivot 14 * │ │ │ │ │ │ └─ Pivot 13 * │ │ │ │ │ └─ Pivot 12 * │ │ │ │ └─ Pivot 11 * │ │ │ └─ Pivot 2 * │ │ └─ Pivot 1 * │ └─ Pivot 0 * └─ Implied minimum * * Slot contents: * Internal (non-leaf) nodes contain pointers to other nodes. * Leaf nodes contain entries. * * The location of interest is often referred to as an offset. All offsets have * a slot, but the last offset has an implied pivot from the node above (or * UINT_MAX for the root node. * * Ranges complicate certain write activities. When modifying any of * the B-tree variants, it is known that one entry will either be added or * deleted. When modifying the Maple Tree, one store operation may overwrite * the entire data set, or one half of the tree, or the middle half of the tree. *
*/
/* * Kernel pointer hashing renders much of the maple tree dump useless as tagged * pointers get hashed to arbitrary values. * * If CONFIG_DEBUG_VM_MAPLE_TREE is set we are in a debug mode where it is * permissible to bypass this. Otherwise remain cautious and retain the hashing. * * Userland doesn't know about %px so also use %p there.
*/ #ifdefined(__KERNEL__) && defined(CONFIG_DEBUG_VM_MAPLE_TREE) #define PTR_FMT "%px" #else #define PTR_FMT "%p" #endif
#define MA_ROOT_PARENT 1
/* * Maple state flags * * MA_STATE_BULK - Bulk insert mode * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation
*/ #define MA_STATE_BULK 1 #define MA_STATE_REBALANCE 2 #define MA_STATE_PREALLOC 4
/* * The maple_subtree_state is used to build a tree to replace a segment of an * existing tree in a more atomic way. Any walkers of the older tree will hit a * dead node and restart on updates.
*/ struct maple_subtree_state { struct ma_state *orig_l; /* Original left side of subtree */ struct ma_state *orig_r; /* Original right side of subtree */ struct ma_state *l; /* New left side of subtree */ struct ma_state *m; /* New middle of subtree (rare) */ struct ma_state *r; /* New right side of subtree */ struct ma_topiary *free; /* nodes to be freed */ struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ struct maple_big_node *bn;
};
#ifdef CONFIG_KASAN_STACK /* Prevent mas_wr_bnode() from exceeding the stack frame limit */ #define noinline_for_kasan noinline_for_stack #else #define noinline_for_kasan inline #endif
/* * ma_free_rcu() - Use rcu callback to free a maple node * @node: The node to free * * The maple tree uses the parent pointer to indicate this node is no longer in * use and will be freed.
*/ staticvoid ma_free_rcu(struct maple_node *node)
{
WARN_ON(node->parent != ma_parent_ptr(node));
call_rcu(&node->rcu, mt_free_rcu);
}
/* * We also reserve values with the bottom two bits set to '10' which are * below 4096
*/ static __always_inline bool mt_is_reserved(constvoid *entry)
{ return ((unsignedlong)entry < MAPLE_RESERVED_RANGE) &&
xa_is_internal(entry);
}
/* * mte_to_mat() - Convert a maple encoded node to a maple topiary node. * @entry: The maple encoded node * * Return: a maple topiary pointer
*/ staticinlinestruct maple_topiary *mte_to_mat(conststruct maple_enode *entry)
{ return (struct maple_topiary *)
((unsignedlong)entry & ~MAPLE_NODE_MASK);
}
/* * mas_mn() - Get the maple state node. * @mas: The maple state * * Return: the maple node (not encoded - bare pointer).
*/ staticinlinestruct maple_node *mas_mn(conststruct ma_state *mas)
{ return mte_to_node(mas->node);
}
/* * mte_set_node_dead() - Set a maple encoded node as dead. * @mn: The maple encoded node.
*/ staticinlinevoid mte_set_node_dead(struct maple_enode *mn)
{
mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
smp_wmb(); /* Needed for RCU */
}
/* Bit 1 indicates the root is a node */ #define MAPLE_ROOT_NODE 0x02 /* maple_type stored bit 3-6 */ #define MAPLE_ENODE_TYPE_SHIFT 0x03 /* Bit 2 means a NULL somewhere below */ #define MAPLE_ENODE_NULL 0x04
/* * The Parent Pointer * Excluding root, the parent pointer is 256B aligned like all other tree nodes. * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 * bit values need an extra bit to store the offset. This extra bit comes from * a reuse of the last bit in the node type. This is possible by using bit 1 to * indicate if bit 2 is part of the type or the slot. * * Note types: * 0x??1 = Root * 0x?00 = 16 bit nodes * 0x010 = 32 bit nodes * 0x110 = 64 bit nodes * * Slot size and alignment * 0b??1 : Root * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 * 0b110 : 64 bit values, type in 0-2, slot in 3-7
*/
/* * mte_parent_shift() - Get the parent shift for the slot storage. * @parent: The parent pointer cast as an unsigned long * Return: The shift into that pointer to the star to of the slot
*/ staticinlineunsignedlong mte_parent_shift(unsignedlong parent)
{ /* Note bit 1 == 0 means 16B */ if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) return MAPLE_PARENT_SLOT_SHIFT;
return MAPLE_PARENT_16B_SLOT_SHIFT;
}
/* * mte_parent_slot_mask() - Get the slot mask for the parent. * @parent: The parent pointer cast as an unsigned long. * Return: The slot mask for that parent.
*/ staticinlineunsignedlong mte_parent_slot_mask(unsignedlong parent)
{ /* Note bit 1 == 0 means 16B */ if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) return MAPLE_PARENT_SLOT_MASK;
return MAPLE_PARENT_16B_SLOT_MASK;
}
/* * mas_parent_type() - Return the maple_type of the parent from the stored * parent type. * @mas: The maple state * @enode: The maple_enode to extract the parent's enum * Return: The node->parent maple_type
*/ staticinline enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
{ unsignedlong p_type;
p_type = (unsignedlong)mte_to_node(enode)->parent; if (WARN_ON(p_type & MAPLE_PARENT_ROOT)) return 0;
p_type &= MAPLE_NODE_MASK;
p_type &= ~mte_parent_slot_mask(p_type); switch (p_type) { case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ if (mt_is_alloc(mas->tree)) return maple_arange_64; return maple_range_64;
}
return 0;
}
/* * mas_set_parent() - Set the parent node and encode the slot * @mas: The maple state * @enode: The encoded maple node. * @parent: The encoded maple node that is the parent of @enode. * @slot: The slot that @enode resides in @parent. * * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the * parent type.
*/ staticinline void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, conststruct maple_enode *parent, unsignedchar slot)
{ unsignedlong val = (unsignedlong)parent; unsignedlong shift; unsignedlong type; enum maple_type p_type = mte_node_type(parent);
switch (p_type) { case maple_range_64: case maple_arange_64:
shift = MAPLE_PARENT_SLOT_SHIFT;
type = MAPLE_PARENT_RANGE64; break; default: case maple_dense: case maple_leaf_64:
shift = type = 0; break;
}
val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
val |= (slot << shift) | type;
mte_to_node(enode)->parent = ma_parent_ptr(val);
}
/* * mte_parent_slot() - get the parent slot of @enode. * @enode: The encoded maple node. * * Return: The slot in the parent node where @enode resides.
*/ static __always_inline unsignedint mte_parent_slot(conststruct maple_enode *enode)
{ unsignedlong val = (unsignedlong)mte_to_node(enode)->parent;
if (unlikely(val & MA_ROOT_PARENT)) return 0;
/* * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
*/ return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
}
/* * mte_parent() - Get the parent of @node. * @enode: The encoded maple node. * * Return: The parent maple node.
*/ static __always_inline struct maple_node *mte_parent(conststruct maple_enode *enode)
{ return (void *)((unsignedlong)
(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
}
/* * ma_dead_node() - check if the @enode is dead. * @enode: The encoded maple node * * Return: true if dead, false otherwise.
*/ static __always_inline bool ma_dead_node(conststruct maple_node *node)
{ struct maple_node *parent;
/* Do not reorder reads from the node prior to the parent check */
smp_rmb();
parent = (void *)((unsignedlong) node->parent & ~MAPLE_NODE_MASK); return (parent == node);
}
/* * mte_dead_node() - check if the @enode is dead. * @enode: The encoded maple node * * Return: true if dead, false otherwise.
*/ static __always_inline bool mte_dead_node(conststruct maple_enode *enode)
{ struct maple_node *node;
/* * mas_allocated() - Get the number of nodes allocated in a maple state. * @mas: The maple state * * The ma_state alloc member is overloaded to hold a pointer to the first * allocated node or to the number of requested nodes to allocate. If bit 0 is * set, then the alloc contains the number of requested nodes. If there is an * allocated node, then the total allocated nodes is in that node. * * Return: The total number of nodes allocated
*/ staticinlineunsignedlong mas_allocated(conststruct ma_state *mas)
{ if (!mas->alloc || ((unsignedlong)mas->alloc & 0x1)) return 0;
return mas->alloc->total;
}
/* * mas_set_alloc_req() - Set the requested number of allocations. * @mas: the maple state * @count: the number of allocations. * * The requested number of allocations is either in the first allocated node, * located in @mas->alloc->request_count, or directly in @mas->alloc if there is * no allocated node. Set the request either in the node or do the necessary * encoding to store in @mas->alloc directly.
*/ staticinlinevoid mas_set_alloc_req(struct ma_state *mas, unsignedlong count)
{ if (!mas->alloc || ((unsignedlong)mas->alloc & 0x1)) { if (!count)
mas->alloc = NULL; else
mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); return;
}
mas->alloc->request_count = count;
}
/* * mas_alloc_req() - get the requested number of allocations. * @mas: The maple state * * The alloc count is either stored directly in @mas, or in * @mas->alloc->request_count if there is at least one node allocated. Decode * the request count if it's stored directly in @mas->alloc. * * Return: The allocation request count.
*/ staticinlineunsignedint mas_alloc_req(conststruct ma_state *mas)
{ if ((unsignedlong)mas->alloc & 0x1) return (unsignedlong)(mas->alloc) >> 1; elseif (mas->alloc) return mas->alloc->request_count; return 0;
}
/* * ma_pivots() - Get a pointer to the maple node pivots. * @node: the maple node * @type: the node type * * In the event of a dead node, this array may be %NULL * * Return: A pointer to the maple node pivots
*/ staticinlineunsignedlong *ma_pivots(struct maple_node *node, enum maple_type type)
{ switch (type) { case maple_arange_64: return node->ma64.pivot; case maple_range_64: case maple_leaf_64: return node->mr64.pivot; case maple_dense: return NULL;
} return NULL;
}
/* * ma_gaps() - Get a pointer to the maple node gaps. * @node: the maple node * @type: the node type * * Return: A pointer to the maple node gaps
*/ staticinlineunsignedlong *ma_gaps(struct maple_node *node, enum maple_type type)
{ switch (type) { case maple_arange_64: return node->ma64.gap; case maple_range_64: case maple_leaf_64: case maple_dense: return NULL;
} return NULL;
}
/* * mas_safe_pivot() - get the pivot at @piv or mas->max. * @mas: The maple state * @pivots: The pointer to the maple node pivots * @piv: The pivot to fetch * @type: The maple node type * * Return: The pivot at @piv within the limit of the @pivots array, @mas->max * otherwise.
*/ static __always_inline unsignedlong
mas_safe_pivot(conststruct ma_state *mas, unsignedlong *pivots, unsignedchar piv, enum maple_type type)
{ if (piv >= mt_pivots[type]) return mas->max;
return pivots[piv];
}
/* * mas_safe_min() - Return the minimum for a given offset. * @mas: The maple state * @pivots: The pointer to the maple node pivots * @offset: The offset into the pivot array * * Return: The minimum range value that is contained in @offset.
*/ staticinlineunsignedlong
mas_safe_min(struct ma_state *mas, unsignedlong *pivots, unsignedchar offset)
{ if (likely(offset)) return pivots[offset - 1] + 1;
return mas->min;
}
/* * mte_set_pivot() - Set a pivot to a value in an encoded maple node. * @mn: The encoded maple node * @piv: The pivot offset * @val: The value of the pivot
*/ staticinlinevoid mte_set_pivot(struct maple_enode *mn, unsignedchar piv, unsignedlong val)
{ struct maple_node *node = mte_to_node(mn); enum maple_type type = mte_node_type(mn);
BUG_ON(piv >= mt_pivots[type]); switch (type) { case maple_range_64: case maple_leaf_64:
node->mr64.pivot[piv] = val; break; case maple_arange_64:
node->ma64.pivot[piv] = val; break; case maple_dense: break;
}
}
/* * ma_slots() - Get a pointer to the maple node slots. * @mn: The maple node * @mt: The maple node type * * Return: A pointer to the maple node slots
*/ staticinlinevoid __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
{ switch (mt) { case maple_arange_64: return mn->ma64.slot; case maple_range_64: case maple_leaf_64: return mn->mr64.slot; case maple_dense: return mn->slot;
}
static __always_inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots, unsignedchar offset)
{ return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
} /* * mas_slot_locked() - Get the slot value when holding the maple tree lock. * @mas: The maple state * @slots: The pointer to the slots * @offset: The offset into the slots array to fetch * * Return: The entry stored in @slots at the @offset.
*/ static __always_inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, unsignedchar offset)
{ return mt_slot_locked(mas->tree, slots, offset);
}
/* * mas_slot() - Get the slot value when not holding the maple tree lock. * @mas: The maple state * @slots: The pointer to the slots * @offset: The offset into the slots array to fetch * * Return: The entry stored in @slots at the @offset
*/ static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots, unsignedchar offset)
{ return mt_slot(mas->tree, slots, offset);
}
/* * mas_root() - Get the maple tree root. * @mas: The maple state. * * Return: The pointer to the root of the tree
*/ static __always_inline void *mas_root(struct ma_state *mas)
{ return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
}
/* * mas_root_locked() - Get the maple tree root when holding the maple tree lock. * @mas: The maple state. * * Return: The pointer to the root of the tree
*/ staticinlinevoid *mas_root_locked(struct ma_state *mas)
{ return mt_root_locked(mas->tree);
}
/* * ma_set_meta() - Set the metadata information of a node. * @mn: The maple node * @mt: The maple node type * @offset: The offset of the highest sub-gap in this node. * @end: The end of the data in this node.
*/ staticinlinevoid ma_set_meta(struct maple_node *mn, enum maple_type mt, unsignedchar offset, unsignedchar end)
{ struct maple_metadata *meta = ma_meta(mn, mt);
meta->gap = offset;
meta->end = end;
}
/* * mt_clear_meta() - clear the metadata information of a node, if it exists * @mt: The maple tree * @mn: The maple node * @type: The maple node type
*/ staticinlinevoid mt_clear_meta(struct maple_tree *mt, struct maple_node *mn, enum maple_type type)
{ struct maple_metadata *meta; unsignedlong *pivots; void __rcu **slots; void *next;
switch (type) { case maple_range_64:
pivots = mn->mr64.pivot; if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
slots = mn->mr64.slot;
next = mt_slot_locked(mt, slots,
MAPLE_RANGE64_SLOTS - 1); if (unlikely((mte_to_node(next) &&
mte_node_type(next)))) return; /* no metadata, could be node */
}
fallthrough; case maple_arange_64:
meta = ma_meta(mn, type); break; default: return;
}
meta->gap = 0;
meta->end = 0;
}
/* * ma_meta_end() - Get the data end of a node from the metadata * @mn: The maple node * @mt: The maple node type
*/ staticinlineunsignedchar ma_meta_end(struct maple_node *mn, enum maple_type mt)
{ struct maple_metadata *meta = ma_meta(mn, mt);
return meta->end;
}
/* * ma_meta_gap() - Get the largest gap location of a node from the metadata * @mn: The maple node
*/ staticinlineunsignedchar ma_meta_gap(struct maple_node *mn)
{ return mn->ma64.meta.gap;
}
/* * ma_set_meta_gap() - Set the largest gap location in a nodes metadata * @mn: The maple node * @mt: The maple node type * @offset: The location of the largest gap.
*/ staticinlinevoid ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, unsignedchar offset)
{
struct maple_metadata *meta = ma_meta(mn, mt);
meta->gap = offset;
}
/* * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. * @mat: the ma_topiary, a linked list of dead nodes. * @dead_enode: the node to be marked as dead and added to the tail of the list * * Add the @dead_enode to the linked list in @mat.
*/ staticinlinevoid mat_add(struct ma_topiary *mat, struct maple_enode *dead_enode)
{
mte_set_node_dead(dead_enode);
mte_to_mat(dead_enode)->next = NULL; if (!mat->tail) {
mat->tail = mat->head = dead_enode; return;
}
staticvoid mt_free_walk(struct rcu_head *head); staticvoid mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, bool free); /* * mas_mat_destroy() - Free all nodes and subtrees in a dead list. * @mas: the maple state * @mat: the ma_topiary linked list of dead nodes to free. * * Destroy walk a dead list.
*/ staticvoid mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
{ struct maple_enode *next; struct maple_node *node; bool in_rcu = mt_in_rcu(mas->tree);
while (mat->head) {
next = mte_to_mat(mat->head)->next;
node = mte_to_node(mat->head);
mt_destroy_walk(mat->head, mas->tree, !in_rcu); if (in_rcu)
call_rcu(&node->rcu, mt_free_walk);
mat->head = next;
}
} /* * mas_descend() - Descend into the slot stored in the ma_state. * @mas: the maple state. * * Note: Not RCU safe, only use in write side or debug code.
*/ staticinlinevoid mas_descend(struct ma_state *mas)
{ enum maple_type type; unsignedlong *pivots; struct maple_node *node; void __rcu **slots;
/* * mte_set_gap() - Set a maple node gap. * @mn: The encoded maple node * @gap: The offset of the gap to set * @val: The gap value
*/ staticinlinevoid mte_set_gap(conststruct maple_enode *mn, unsignedchar gap, unsignedlong val)
{ switch (mte_node_type(mn)) { default: break; case maple_arange_64:
mte_to_node(mn)->ma64.gap[gap] = val; break;
}
}
/* * mas_ascend() - Walk up a level of the tree. * @mas: The maple state * * Sets the @mas->max and @mas->min for the parent node of mas->node. This * may cause several levels of walking up to find the correct min and max. * May find a dead node which will cause a premature return. * Return: 1 on dead node, 0 otherwise
*/ staticint mas_ascend(struct ma_state *mas)
{ struct maple_enode *p_enode; /* parent enode. */ struct maple_enode *a_enode; /* ancestor enode. */ struct maple_node *a_node; /* ancestor node. */ struct maple_node *p_node; /* parent node. */ unsignedchar a_slot; enum maple_type a_type; unsignedlong min, max; unsignedlong *pivots; bool set_max = false, set_min = false;
/* * !mas->offset implies that parent node min == mas->min. * mas->offset > 0 implies that we need to walk up to find the * implied pivot min.
*/ if (!mas->offset) {
min = mas->min;
set_min = true;
}
if (!set_min && a_slot) {
set_min = true;
min = pivots[a_slot - 1] + 1;
}
if (!set_max && a_slot < mt_pivots[a_type]) {
set_max = true;
max = pivots[a_slot];
}
if (unlikely(ma_dead_node(a_node))) return 1;
if (unlikely(ma_is_root(a_node))) break;
} while (!set_min || !set_max);
mas->max = max;
mas->min = min; return 0;
}
/* * mas_pop_node() - Get a previously allocated maple node from the maple state. * @mas: The maple state * * Return: A pointer to a maple node.
*/ staticinlinestruct maple_node *mas_pop_node(struct ma_state *mas)
{ struct maple_alloc *ret, *node = mas->alloc; unsignedlong total = mas_allocated(mas); unsignedint req = mas_alloc_req(mas);
/* nothing or a request pending. */ if (WARN_ON(!total)) return NULL;
if (total == 1) { /* single allocation in this ma_state */
mas->alloc = NULL;
ret = node; goto single_node;
}
if (node->node_count == 1) { /* Single allocation in this node. */
mas->alloc = node->slot[0];
mas->alloc->total = node->total - 1;
ret = node; goto new_head;
}
node->total--;
ret = node->slot[--node->node_count];
node->slot[node->node_count] = NULL;
single_node:
new_head: if (req) {
req++;
mas_set_alloc_req(mas, req);
}
/* * mas_push_node() - Push a node back on the maple state allocation. * @mas: The maple state * @used: The used maple node * * Stores the maple node back into @mas->alloc for reuse. Updates allocated and * requested node count as necessary.
*/ staticinlinevoid mas_push_node(struct ma_state *mas, struct maple_node *used)
{ struct maple_alloc *reuse = (struct maple_alloc *)used; struct maple_alloc *head = mas->alloc; unsignedlong count; unsignedint requested = mas_alloc_req(mas);
/* * mas_free() - Free an encoded maple node * @mas: The maple state * @used: The encoded maple node to free. * * Uses rcu free if necessary, pushes @used back on the maple state allocations * otherwise.
*/ staticinlinevoid mas_free(struct ma_state *mas, struct maple_enode *used)
{ struct maple_node *tmp = mte_to_node(used);
if (mt_in_rcu(mas->tree))
ma_free_rcu(tmp); else
mas_push_node(mas, tmp);
}
/* * mas_node_count_gfp() - Check if enough nodes are allocated and request more * if there is not enough nodes. * @mas: The maple state * @count: The number of nodes needed * @gfp: the gfp flags
*/ staticvoid mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
{ unsignedlong allocated = mas_allocated(mas);
/* * mas_node_count() - Check if enough nodes are allocated and request more if * there is not enough nodes. * @mas: The maple state * @count: The number of nodes needed * * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
*/ staticvoid mas_node_count(struct ma_state *mas, int count)
{ return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
}
/* * mas_start() - Sets up maple state for operations. * @mas: The maple state. * * If mas->status == ma_start, then set the min, max and depth to * defaults. * * Return: * - If mas->node is an error or not mas_start, return NULL. * - If it's an empty tree: NULL & mas->status == ma_none * - If it's a single entry: The entry & mas->status == ma_root * - If it's a tree: NULL & mas->status == ma_active
*/ staticinlinestruct maple_enode *mas_start(struct ma_state *mas)
{ if (likely(mas_is_start(mas))) { struct maple_enode *root;
mas->min = 0;
mas->max = ULONG_MAX;
retry:
mas->depth = 0;
root = mas_root(mas); /* Tree with nodes */ if (likely(xa_is_node(root))) {
mas->depth = 0;
mas->status = ma_active;
mas->node = mte_safe_root(root);
mas->offset = 0; if (mte_dead_node(mas->node)) goto retry;
return NULL;
}
mas->node = NULL; /* empty tree */ if (unlikely(!root)) {
mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS; return NULL;
}
/* Single entry tree */
mas->status = ma_root;
mas->offset = MAPLE_NODE_SLOTS;
/* Single entry tree. */ if (mas->index > 0) return NULL;
return root;
}
return NULL;
}
/* * ma_data_end() - Find the end of the data in a node. * @node: The maple node * @type: The maple node type * @pivots: The array of pivots in the node * @max: The maximum value in the node * * Uses metadata to find the end of the data when possible. * Return: The zero indexed last slot with data (may be null).
*/ static __always_inline unsignedchar ma_data_end(struct maple_node *node, enum maple_type type, unsignedlong *pivots, unsignedlong max)
{ unsignedchar offset;
if (!pivots) return 0;
if (type == maple_arange_64) return ma_meta_end(node, type);
offset = mt_pivots[type] - 1; if (likely(!pivots[offset])) return ma_meta_end(node, type);
if (likely(pivots[offset] == max)) return offset;
return mt_pivots[type];
}
/* * mas_data_end() - Find the end of the data (slot). * @mas: the maple state * * This method is optimized to check the metadata of a node if the node type * supports data end metadata. * * Return: The zero indexed last slot with data (may be null).
*/ staticinlineunsignedchar mas_data_end(struct ma_state *mas)
{ enum maple_type type; struct maple_node *node; unsignedchar offset; unsignedlong *pivots;
type = mte_node_type(mas->node);
node = mas_mn(mas); if (type == maple_arange_64) return ma_meta_end(node, type);
pivots = ma_pivots(node, type); if (unlikely(ma_dead_node(node))) return 0;
offset = mt_pivots[type] - 1; if (likely(!pivots[offset])) return ma_meta_end(node, type);
if (likely(pivots[offset] == mas->max)) return offset;
return mt_pivots[type];
}
/* * mas_leaf_max_gap() - Returns the largest gap in a leaf node * @mas: the maple state * * Return: The maximum gap in the leaf.
*/ staticunsignedlong mas_leaf_max_gap(struct ma_state *mas)
{ enum maple_type mt; unsignedlong pstart, gap, max_gap; struct maple_node *mn; unsignedlong *pivots; void __rcu **slots; unsignedchar i; unsignedchar max_piv;
mt = mte_node_type(mas->node);
mn = mas_mn(mas);
slots = ma_slots(mn, mt);
max_gap = 0; if (unlikely(ma_is_dense(mt))) {
gap = 0; for (i = 0; i < mt_slots[mt]; i++) { if (slots[i]) { if (gap > max_gap)
max_gap = gap;
gap = 0;
} else {
gap++;
}
} if (gap > max_gap)
max_gap = gap; return max_gap;
}
/* * Check the first implied pivot optimizes the loop below and slot 1 may * be skipped if there is a gap in slot 0.
*/
pivots = ma_pivots(mn, mt); if (likely(!slots[0])) {
max_gap = pivots[0] - mas->min + 1;
i = 2;
} else {
i = 1;
}
/* reduce max_piv as the special case is checked before the loop */
max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; /* * Check end implied pivot which can only be a gap on the right most * node.
*/ if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
gap = ULONG_MAX - pivots[max_piv]; if (gap > max_gap)
max_gap = gap;
if (max_gap > pivots[max_piv] - mas->min) return max_gap;
}
for (; i <= max_piv; i++) { /* data == no gap. */ if (likely(slots[i])) continue;
pstart = pivots[i - 1];
gap = pivots[i] - pstart; if (gap > max_gap)
max_gap = gap;
/* There cannot be two gaps in a row. */
i++;
} return max_gap;
}
/* * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) * @node: The maple node * @gaps: The pointer to the gaps * @mt: The maple node type * @off: Pointer to store the offset location of the gap. * * Uses the metadata data end to scan backwards across set gaps. * * Return: The maximum gap value
*/ staticinlineunsignedlong
ma_max_gap(struct maple_node *node, unsignedlong *gaps, enum maple_type mt, unsignedchar *off)
{ unsignedchar offset, i; unsignedlong max_gap = 0;
i = offset = ma_meta_end(node, mt); do { if (gaps[i] > max_gap) {
max_gap = gaps[i];
offset = i;
}
} while (i--);
*off = offset; return max_gap;
}
/* * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. * @mas: The maple state. * * Return: The gap value.
*/ staticinlineunsignedlong mas_max_gap(struct ma_state *mas)
{ unsignedlong *gaps; unsignedchar offset; enum maple_type mt; struct maple_node *node;
mt = mte_node_type(mas->node); if (ma_is_leaf(mt)) return mas_leaf_max_gap(mas);
/* * mas_parent_gap() - Set the parent gap and any gaps above, as needed * @mas: The maple state * @offset: The gap offset in the parent to set * @new: The new gap value. * * Set the parent gap then continue to set the gap upwards, using the metadata * of the parent to see if it is necessary to check the node above.
*/ staticinlinevoid mas_parent_gap(struct ma_state *mas, unsignedchar offset, unsignedlongnew)
{ unsignedlong meta_gap = 0; struct maple_node *pnode; struct maple_enode *penode; unsignedlong *pgaps; unsignedchar meta_offset; enum maple_type pmt;
if (offset != meta_offset) { if (meta_gap > new) return;
ma_set_meta_gap(pnode, pmt, offset);
} elseif (new < meta_gap) { new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
ma_set_meta_gap(pnode, pmt, meta_offset);
}
if (ma_is_root(pnode)) return;
/* Go to the parent node. */
pnode = mte_parent(penode);
pmt = mas_parent_type(mas, penode);
pgaps = ma_gaps(pnode, pmt);
offset = mte_parent_slot(penode);
penode = mt_mk_node(pnode, pmt); goto ascend;
}
/* * mas_update_gap() - Update a nodes gaps and propagate up if necessary. * @mas: the maple state.
*/ staticinlinevoid mas_update_gap(struct ma_state *mas)
{ unsignedchar pslot; unsignedlong p_gap; unsignedlong max_gap;
if (p_gap != max_gap)
mas_parent_gap(mas, pslot, max_gap);
}
/* * mas_adopt_children() - Set the parent pointer of all nodes in @parent to * @parent with the slot encoded. * @mas: the maple state (for the tree) * @parent: the maple encoded node containing the children.
*/ staticinlinevoid mas_adopt_children(struct ma_state *mas, struct maple_enode *parent)
{ enum maple_type type = mte_node_type(parent); struct maple_node *node = mte_to_node(parent); void __rcu **slots = ma_slots(node, type); unsignedlong *pivots = ma_pivots(node, type); struct maple_enode *child; unsignedchar offset;
/* * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old * node as dead. * @mas: the maple state with the new node * @old_enode: The old maple encoded node to replace. * @new_height: if we are inserting a root node, update the height of the tree
*/ staticinlinevoid mas_put_in_tree(struct ma_state *mas, struct maple_enode *old_enode, char new_height)
__must_hold(mas->tree->ma_lock)
{ unsignedchar offset; void __rcu **slots;
/* * mas_replace_node() - Replace a node by putting it in the tree, marking it * dead, and freeing it. * the parent encoding to locate the maple node in the tree. * @mas: the ma_state with @mas->node pointing to the new node. * @old_enode: The old maple encoded node. * @new_height: The new height of the tree as a result of the operation
*/ staticinlinevoid mas_replace_node(struct ma_state *mas, struct maple_enode *old_enode, unsignedchar new_height)
__must_hold(mas->tree->ma_lock)
{
mas_put_in_tree(mas, old_enode, new_height);
mas_free(mas, old_enode);
}
/* * mas_find_child() - Find a child who has the parent @mas->node. * @mas: the maple state with the parent. * @child: the maple state to store the child.
*/ staticinlinebool mas_find_child(struct ma_state *mas, struct ma_state *child)
__must_hold(mas->tree->ma_lock)
{ enum maple_type mt; unsignedchar offset; unsignedchar end; unsignedlong *pivots; struct maple_enode *entry; struct maple_node *node; void __rcu **slots;
/* * mab_shift_right() - Shift the data in mab right. Note, does not clean out the * old data or set b_node->b_end. * @b_node: the maple_big_node * @shift: the shift count
*/ staticinlinevoid mab_shift_right(struct maple_big_node *b_node, unsignedchar shift)
{ unsignedlong size = b_node->b_end * sizeof(unsignedlong);
/* * mab_middle_node() - Check if a middle node is needed (unlikely) * @b_node: the maple_big_node that contains the data. * @split: the potential split location * @slot_count: the size that can be stored in a single node being considered. * * Return: true if a middle node is required.
*/ staticinlinebool mab_middle_node(struct maple_big_node *b_node, int split, unsignedchar slot_count)
{ unsignedchar size = b_node->b_end;
/* * mab_no_null_split() - ensure the split doesn't fall on a NULL * @b_node: the maple_big_node with the data * @split: the suggested split location * @slot_count: the number of slots in the node being considered. * * Return: the split location.
*/ staticinlineint mab_no_null_split(struct maple_big_node *b_node, unsignedchar split, unsignedchar slot_count)
{ if (!b_node->slot[split]) { /* * If the split is less than the max slot && the right side will * still be sufficient, then increment the split on NULL.
*/ if ((split < slot_count - 1) &&
(b_node->b_end - split) > (mt_min_slots[b_node->type]))
split++; else
split--;
} return split;
}
/* * mab_calc_split() - Calculate the split location and if there needs to be two * splits. * @mas: The maple state * @bn: The maple_big_node with the data * @mid_split: The second split, if required. 0 otherwise. * * Return: The first split location. The middle split is set in @mid_split.
*/ staticinlineint mab_calc_split(struct ma_state *mas, struct maple_big_node *bn, unsignedchar *mid_split)
{ unsignedchar b_end = bn->b_end; int split = b_end / 2; /* Assume equal split. */ unsignedchar slot_count = mt_slots[bn->type];
/* * To support gap tracking, all NULL entries are kept together and a node cannot * end on a NULL entry, with the exception of the left-most leaf. The * limitation means that the split of a node must be checked for this condition * and be able to put more data in one direction or the other.
*/ if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
*mid_split = 0;
split = b_end - mt_min_slots[bn->type];
if (!ma_is_leaf(bn->type)) return split;
mas->mas_flags |= MA_STATE_REBALANCE; if (!bn->slot[split])
split--; return split;
}
/* * Although extremely rare, it is possible to enter what is known as the 3-way * split scenario. The 3-way split comes about by means of a store of a range * that overwrites the end and beginning of two full nodes. The result is a set * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can * also be located in different parent nodes which are also full. This can * carry upwards all the way to the root in the worst case.
*/ if (unlikely(mab_middle_node(bn, split, slot_count))) {
split = b_end / 3;
*mid_split = split * 2;
} else {
*mid_split = 0;
}
/* Avoid ending a node on a NULL entry */
split = mab_no_null_split(bn, split, slot_count);
if (unlikely(*mid_split))
*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
return split;
}
/* * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node * and set @b_node->b_end to the next free slot. * @mas: The maple state * @mas_start: The starting slot to copy * @mas_end: The end slot to copy (inclusively) * @b_node: The maple_big_node to place the data * @mab_start: The starting location in maple_big_node to store the data.
*/ staticinlinevoid mas_mab_cp(struct ma_state *mas, unsignedchar mas_start, unsignedchar mas_end, struct maple_big_node *b_node, unsignedchar mab_start)
{ enum maple_type mt; struct maple_node *node; void __rcu **slots; unsignedlong *pivots, *gaps; int i = mas_start, j = mab_start; unsignedchar piv_end;
/* * mas_leaf_set_meta() - Set the metadata of a leaf if possible. * @node: The maple node * @mt: The maple type * @end: The node end
*/ staticinlinevoid mas_leaf_set_meta(struct maple_node *node, enum maple_type mt, unsignedchar end)
{ if (end < mt_slots[mt] - 1)
ma_set_meta(node, mt, 0, end);
}
/* * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. * @b_node: the maple_big_node that has the data * @mab_start: the start location in @b_node. * @mab_end: The end location in @b_node (inclusively) * @mas: The maple state with the maple encoded node.
*/ staticinlinevoid mab_mas_cp(struct maple_big_node *b_node, unsignedchar mab_start, unsignedchar mab_end, struct ma_state *mas, bool new_max)
{ int i, j = 0; enum maple_type mt = mte_node_type(mas->node); struct maple_node *node = mte_to_node(mas->node); void __rcu **slots = ma_slots(node, mt); unsignedlong *pivots = ma_pivots(node, mt); unsignedlong *gaps = NULL; unsignedchar end;
if (mab_end - mab_start > mt_pivots[mt])
mab_end--;
if (!pivots[mt_pivots[mt] - 1])
slots[mt_pivots[mt]] = NULL;
i = mab_start; do {
pivots[j++] = b_node->pivot[i++];
} while (i <= mab_end && likely(b_node->pivot[i]));
memcpy(slots, b_node->slot + mab_start, sizeof(void *) * (i - mab_start));
if (new_max)
mas->max = b_node->pivot[i - 1];
end = j - 1; if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { unsignedlong max_gap = 0; unsignedchar offset = 0;
gaps = ma_gaps(node, mt); do {
gaps[--j] = b_node->gap[--i]; if (gaps[j] > max_gap) {
offset = j;
max_gap = gaps[j];
}
} while (j);
/* * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. * @mas: The maple state * @end: The maple node end * @mt: The maple node type
*/ staticinlinevoid mas_bulk_rebalance(struct ma_state *mas, unsignedchar end, enum maple_type mt)
{ if (!(mas->mas_flags & MA_STATE_BULK)) return;
/* * mas_store_b_node() - Store an @entry into the b_node while also copying the * data from a maple encoded node. * @wr_mas: the maple write state * @b_node: the maple_big_node to fill with data * @offset_end: the offset to end copying * * Return: The actual end of the data stored in @b_node
*/ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, struct maple_big_node *b_node, unsignedchar offset_end)
{ unsignedchar slot; unsignedchar b_end; /* Possible underflow of piv will wrap back to 0 before use. */ unsignedlong piv; struct ma_state *mas = wr_mas->mas;
if (piv + 1 < mas->index) { /* Handle range starting after old range */
b_node->slot[b_end] = wr_mas->content; if (!wr_mas->content)
b_node->gap[b_end] = mas->index - 1 - piv;
b_node->pivot[b_end++] = mas->index - 1;
}
/* Store the new entry. */
mas->offset = b_end;
b_node->slot[b_end] = wr_mas->entry;
b_node->pivot[b_end] = mas->last;
/* Appended. */ if (mas->last >= mas->max) goto b_end;
/* Handle new range ending before old range ends */
piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); if (piv > mas->last) { if (piv == ULONG_MAX)
mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
if (offset_end != slot)
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
offset_end);
/* Copy end data to the end of the node. */
mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
b_node->b_end--; return;
b_end:
b_node->b_end = b_end;
}
/* * mas_prev_sibling() - Find the previous node with the same parent. * @mas: the maple state * * Return: True if there is a previous sibling, false otherwise.
*/ staticinlinebool mas_prev_sibling(struct ma_state *mas)
{ unsignedint p_slot = mte_parent_slot(mas->node);
/* For root node, p_slot is set to 0 by mte_parent_slot(). */ if (!p_slot) returnfalse;
/* * mas_next_sibling() - Find the next node with the same parent. * @mas: the maple state * * Return: true if there is a next sibling, false otherwise.
*/ staticinlinebool mas_next_sibling(struct ma_state *mas)
{
MA_STATE(parent, mas->tree, mas->index, mas->last);
/* * mas_node_or_none() - Set the enode and state. * @mas: the maple state * @enode: The encoded maple node. * * Set the node to the enode and the status.
*/ staticinlinevoid mas_node_or_none(struct ma_state *mas, struct maple_enode *enode)
{ if (enode) {
mas->node = enode;
mas->status = ma_active;
} else {
mas->node = NULL;
mas->status = ma_none;
}
}
/* * mas_wr_node_walk() - Find the correct offset for the index in the @mas. * If @mas->index cannot be found within the containing * node, we traverse to the last entry in the node. * @wr_mas: The maple write state * * Uses mas_slot_locked() and does not need to worry about dead nodes.
*/ staticinlinevoid mas_wr_node_walk(struct ma_wr_state *wr_mas)
{ struct ma_state *mas = wr_mas->mas; unsignedchar count, offset;
/* * mast_rebalance_next() - Rebalance against the next node * @mast: The maple subtree state
*/ staticinlinevoid mast_rebalance_next(struct maple_subtree_state *mast)
{ unsignedchar b_end = mast->bn->b_end;
/* * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring * the node to the right. Checking the nodes to the right then the left at each * level upwards until root is reached. * Data is copied into the @mast->bn. * @mast: The maple_subtree_state.
*/ staticinline bool mast_spanning_rebalance(struct maple_subtree_state *mast)
{ struct ma_state r_tmp = *mast->orig_r; struct ma_state l_tmp = *mast->orig_l; unsignedchar depth = 0;
do {
mas_ascend(mast->orig_r);
mas_ascend(mast->orig_l);
depth++; if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
mast->orig_r->offset++; do {
mas_descend(mast->orig_r);
mast->orig_r->offset = 0;
} while (--depth);
/* * mast_ascend() - Ascend the original left and right maple states. * @mast: the maple subtree state. * * Ascend the original left and right sides. Set the offsets to point to the * data already in the new tree (@mast->l and @mast->r).
*/ staticinlinevoid mast_ascend(struct maple_subtree_state *mast)
{
MA_WR_STATE(wr_mas, mast->orig_r, NULL);
mas_ascend(mast->orig_l);
mas_ascend(mast->orig_r);
mast->orig_r->offset = 0;
mast->orig_r->index = mast->r->max; /* last should be larger than or equal to index */ if (mast->orig_r->last < mast->orig_r->index)
mast->orig_r->last = mast->orig_r->index;
wr_mas.type = mte_node_type(mast->orig_r->node);
mas_wr_node_walk(&wr_mas); /* Set up the left side of things */
mast->orig_l->offset = 0;
mast->orig_l->index = mast->l->min;
wr_mas.mas = mast->orig_l;
wr_mas.type = mte_node_type(mast->orig_l->node);
mas_wr_node_walk(&wr_mas);
mast->bn->type = wr_mas.type;
}
/* * mas_new_ma_node() - Create and return a new maple node. Helper function. * @mas: the maple state with the allocations. * @b_node: the maple_big_node with the type encoding. * * Use the node type from the maple_big_node to allocate a new node from the * ma_state. This function exists mainly for code readability. * * Return: A new maple encoded node
*/ staticinlinestruct maple_enode
*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
{ return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
}
/* * mas_mab_to_node() - Set up right and middle nodes * * @mas: the maple state that contains the allocations. * @b_node: the node which contains the data. * @left: The pointer which will have the left node * @right: The pointer which may have the right node * @middle: the pointer which may have the middle node (rare) * @mid_split: the split location for the middle node * * Return: the split of left.
*/ staticinlineunsignedchar mas_mab_to_node(struct ma_state *mas, struct maple_big_node *b_node, struct maple_enode **left, struct maple_enode **right, struct maple_enode **middle, unsignedchar *mid_split)
{ unsignedchar split = 0; unsignedchar slot_count = mt_slots[b_node->type];
if (*mid_split)
*middle = mas_new_ma_node(mas, b_node);
return split;
}
/* * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end * pointer. * @b_node: the big node to add the entry * @mas: the maple state to get the pivot (mas->max) * @entry: the entry to add, if NULL nothing happens.
*/ staticinlinevoid mab_set_b_end(struct maple_big_node *b_node, struct ma_state *mas, void *entry)
{ if (!entry) return;
/* * mas_set_split_parent() - combine_then_separate helper function. Sets the parent * of @mas->node to either @left or @right, depending on @slot and @split * * @mas: the maple state with the node that needs a parent * @left: possible parent 1 * @right: possible parent 2 * @slot: the slot the mas->node was placed * @split: the split location between @left and @right
*/ staticinlinevoid mas_set_split_parent(struct ma_state *mas, struct maple_enode *left, struct maple_enode *right, unsignedchar *slot, unsignedchar split)
{ if (mas_is_none(mas)) return;
/* * mte_mid_split_check() - Check if the next node passes the mid-split * @l: Pointer to left encoded maple node. * @m: Pointer to middle encoded maple node. * @r: Pointer to right encoded maple node. * @slot: The offset * @split: The split location. * @mid_split: The middle split.
*/ staticinlinevoid mte_mid_split_check(struct maple_enode **l, struct maple_enode **r, struct maple_enode *right, unsignedchar slot, unsignedchar *split, unsignedchar mid_split)
{ if (*r == right) return;
if (slot < mid_split) return;
*l = *r;
*r = right;
*split = mid_split;
}
/* * mast_set_split_parents() - Helper function to set three nodes parents. Slot * is taken from @mast->l. * @mast: the maple subtree state * @left: the left node * @right: the right node * @split: the split location.
*/ staticinlinevoid mast_set_split_parents(struct maple_subtree_state *mast, struct maple_enode *left, struct maple_enode *middle, struct maple_enode *right, unsignedchar split, unsignedchar mid_split)
{ unsignedchar slot; struct maple_enode *l = left; struct maple_enode *r = right;
/* * mas_topiary_node() - Dispose of a single node * @mas: The maple state for pushing nodes * @in_rcu: If the tree is in rcu mode * * The node will either be RCU freed or pushed back on the maple state.
*/ staticinlinevoid mas_topiary_node(struct ma_state *mas, struct ma_state *tmp_mas, bool in_rcu)
{ struct maple_node *tmp; struct maple_enode *enode;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.