/* * Generic key, ptr and record wrapper structures. * * These are disk format structures, and are converted where necessary * by the btree specific code that needs to interpret them.
*/ union xfs_btree_ptr {
__be32 s; /* short form ptr */
__be64 l; /* long form ptr */
};
/* * The in-core btree key. Overlapping btrees actually store two keys * per pointer, so we reserve enough memory to hold both. The __*bigkey * items should never be accessed directly.
*/ union xfs_btree_key { struct xfs_bmbt_key bmbt;
xfs_bmdr_key_t bmbr; /* bmbt root block */
xfs_alloc_key_t alloc; struct xfs_inobt_key inobt; struct xfs_rmap_key rmap; struct xfs_rmap_key __rmap_bigkey[2]; struct xfs_refcount_key refc;
};
/* * This nonsense is to make -wlint happy.
*/ #define XFS_LOOKUP_EQ ((xfs_lookup_t)XFS_LOOKUP_EQi) #define XFS_LOOKUP_LE ((xfs_lookup_t)XFS_LOOKUP_LEi) #define XFS_LOOKUP_GE ((xfs_lookup_t)XFS_LOOKUP_GEi)
/* * Decide if these two numeric btree key fields are contiguous, overlapping, * or if there's a gap between them. @x should be the field from the high * key and @y should be the field from the low key.
*/ staticinlineenum xbtree_key_contig xbtree_key_contig(uint64_t x, uint64_t y)
{
x++; if (x < y) return XBTREE_KEY_GAP; if (x == y) return XBTREE_KEY_CONTIGUOUS; return XBTREE_KEY_OVERLAP;
}
/* * Compare key value and cursor value -- positive if key > cur, * negative if key < cur, and zero if equal.
*/ int (*cmp_key_with_cur)(struct xfs_btree_cur *cur, constunion xfs_btree_key *key);
/* * Compare key1 and key2 -- positive if key1 > key2, negative if * key1 < key2, and zero if equal. If the @mask parameter is non NULL, * each key field to be used in the comparison must contain a nonzero * value.
*/ int (*cmp_two_keys)(struct xfs_btree_cur *cur, constunion xfs_btree_key *key1, constunion xfs_btree_key *key2, constunion xfs_btree_key *mask);
conststruct xfs_buf_ops *buf_ops;
/* check that k1 is lower than k2 */ int (*keys_inorder)(struct xfs_btree_cur *cur, constunion xfs_btree_key *k1, constunion xfs_btree_key *k2);
/* check that r1 is lower than r2 */ int (*recs_inorder)(struct xfs_btree_cur *cur, constunion xfs_btree_rec *r1, constunion xfs_btree_rec *r2);
/* * Are these two btree keys immediately adjacent? * * Given two btree keys @key1 and @key2, decide if it is impossible for * there to be a third btree key K satisfying the relationship * @key1 < K < @key2. To determine if two btree records are * immediately adjacent, @key1 should be the high key of the first * record and @key2 should be the low key of the second record. * If the @mask parameter is non NULL, each key field to be used in the * comparison must contain a nonzero value.
*/ enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur, constunion xfs_btree_key *key1, constunion xfs_btree_key *key2, constunion xfs_btree_key *mask);
/* * Reallocate the space for if_broot to fit the number of records. * Move the records and pointers in if_broot to fit the new size. When * shrinking this will eliminate holes between the records and pointers * created by the caller. When growing this will create holes to be * filled in by the caller. * * The caller must not request to add more records than would fit in * the on-disk inode root. If the if_broot is currently NULL, then if * we are adding records, one will be allocated. The caller must also * not request that the number of records go below zero, although it * can go to zero.
*/ struct xfs_btree_block *(*broot_realloc)(struct xfs_btree_cur *cur, unsignedint new_numrecs);
};
/* btree geometry flags */ #define XFS_BTGEO_OVERLAPPING (1U << 0) /* overlapping intervals */ #define XFS_BTGEO_IROOT_RECORDS (1U << 1) /* iroot can store records */
/* readahead info */ #define XFS_BTCUR_LEFTRA (1 << 0) /* left sibling has been read-ahead */ #define XFS_BTCUR_RIGHTRA (1 << 1) /* right sibling has been read-ahead */
uint16_t ra;
};
/* * Btree cursor structure. * This collects all information needed by the btree code in one place.
*/ struct xfs_btree_cur
{ struct xfs_trans *bc_tp; /* transaction we're in, if any */ struct xfs_mount *bc_mp; /* file system mount struct */ conststruct xfs_btree_ops *bc_ops; struct kmem_cache *bc_cache; /* cursor cache */ unsignedint bc_flags; /* btree features - below */ union xfs_btree_irec bc_rec; /* current insert/search record value */
uint8_t bc_nlevels; /* number of levels in the tree */
uint8_t bc_maxlevels; /* maximum levels for this btree type */ struct xfs_group *bc_group;
/* per-type information */ union { struct { struct xfs_inode *ip; short forksize; char whichfork; struct xbtree_ifakeroot *ifake; /* for staging cursor */
} bc_ino; struct { struct xfs_buf *agbp; struct xbtree_afakeroot *afake; /* for staging cursor */
} bc_ag; struct { struct xfbtree *xfbtree;
} bc_mem;
};
/* per-format private data */ union { struct { int allocated;
} bc_bmap; /* bmapbt */ struct { unsignedint nr_ops; /* # record updates */ unsignedint shape_changes; /* # of extent splits */
} bc_refc; /* refcountbt/rtrefcountbt */
};
/* Must be at the end of the struct! */ struct xfs_btree_level bc_levels[];
};
/* * Compute the size of a btree cursor that can handle a btree of a given * height. The bc_levels array handles node and leaf blocks, so its size * is exactly nlevels.
*/ staticinline size_t
xfs_btree_cur_sizeof(unsignedint nlevels)
{ return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
}
/* cursor state flags */ /* * The root of this btree is a fakeroot structure so that we can stage a btree * rebuild without leaving it accessible via primary metadata. The ops struct * is dynamically allocated and must be freed when the cursor is deleted.
*/ #define XFS_BTREE_STAGING (1U << 0)
/* We are converting a delalloc reservation (only for bmbt btrees) */ #define XFS_BTREE_BMBT_WASDEL (1U << 1)
/* For extent swap, ignore owner check in verifier (only for bmbt btrees) */ #define XFS_BTREE_BMBT_INVALID_OWNER (1U << 2)
/* Cursor is active (only for allocbt btrees) */ #define XFS_BTREE_ALLOCBT_ACTIVE (1U << 3)
/* * Convert from buffer to btree block header.
*/ #define XFS_BUF_TO_BLOCK(bp) ((struct xfs_btree_block *)((bp)->b_addr))
xfs_failaddr_t __xfs_btree_check_block(struct xfs_btree_cur *cur, struct xfs_btree_block *block, int level, struct xfs_buf *bp); int __xfs_btree_check_ptr(struct xfs_btree_cur *cur, constunion xfs_btree_ptr *ptr, int index, int level);
/* * Check that block header is ok.
*/ int
xfs_btree_check_block( struct xfs_btree_cur *cur, /* btree cursor */ struct xfs_btree_block *block, /* generic btree block pointer */ int level, /* level of the btree block */ struct xfs_buf *bp); /* buffer containing block, if any */
/* * Delete the btree cursor.
*/ void
xfs_btree_del_cursor( struct xfs_btree_cur *cur, /* btree cursor */ int error); /* del because of error */
/* * Duplicate the btree cursor. * Allocate a new one, copy the record, re-get the buffers.
*/ int/* error */
xfs_btree_dup_cursor( struct xfs_btree_cur *cur, /* input cursor */ struct xfs_btree_cur **ncur);/* output cursor */
/* * Compute first and last byte offsets for the fields given. * Interprets the offsets table, which contains struct field offsets.
*/ void
xfs_btree_offsets(
uint32_t fields, /* bitmask of fields */ constshort *offsets,/* table of field offsets */ int nbits, /* number of bits to inspect */ int *first, /* output: first byte offset */ int *last); /* output: last byte offset */
/* * Common btree core entry points.
*/ int xfs_btree_increment(struct xfs_btree_cur *, int, int *); int xfs_btree_decrement(struct xfs_btree_cur *, int, int *); int xfs_btree_lookup(struct xfs_btree_cur *, xfs_lookup_t, int *); int xfs_btree_update(struct xfs_btree_cur *, union xfs_btree_rec *); int xfs_btree_new_iroot(struct xfs_btree_cur *, int *, int *); int xfs_btree_insert(struct xfs_btree_cur *, int *); int xfs_btree_delete(struct xfs_btree_cur *, int *); int xfs_btree_get_rec(struct xfs_btree_cur *, union xfs_btree_rec **, int *); int xfs_btree_change_owner(struct xfs_btree_cur *cur, uint64_t new_owner, struct list_head *buffer_list);
/* * Return codes for the query range iterator function are 0 to continue * iterating, and non-zero to stop iterating. Any non-zero value will be * passed up to the _query_range caller. The special value -ECANCELED can be * used to stop iteration, because _query_range never generates that error * code on its own.
*/ typedefint (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur, constunion xfs_btree_rec *rec, void *priv);
/* Does this cursor point to the last block in the given level? */ staticinlinebool
xfs_btree_islastblock( struct xfs_btree_cur *cur, int level)
{ struct xfs_btree_block *block; struct xfs_buf *bp;
/* BMBT allocations can come through from non-transactional context. */
cur = kmem_cache_zalloc(cache,
GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
cur->bc_ops = ops;
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_maxlevels = maxlevels;
cur->bc_cache = cache;
return cur;
}
int __init xfs_btree_init_cur_caches(void); void xfs_btree_destroy_cur_caches(void);
int xfs_btree_goto_left_edge(struct xfs_btree_cur *cur);
/* Does this level of the cursor point to the inode root (and not a block)? */ staticinlinebool
xfs_btree_at_iroot( conststruct xfs_btree_cur *cur, int level)
{ return cur->bc_ops->type == XFS_BTREE_TYPE_INODE &&
level == cur->bc_nlevels - 1;
}
int xfs_btree_alloc_metafile_block(struct xfs_btree_cur *cur, constunion xfs_btree_ptr *start, union xfs_btree_ptr *newp, int *stat); int xfs_btree_free_metafile_block(struct xfs_btree_cur *cur, struct xfs_buf *bp);
#endif/* __XFS_BTREE_H__ */
Messung V0.5
¤ Dauer der Verarbeitung: 0.12 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.