/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*/
/** * Clone bitmaps (bi_clone): * * - When a block is freed, we remember the previous state of the block in the * clone bitmap, and only mark the block as free in the real bitmap. * * - When looking for a block to allocate, we check for a free block in the * clone bitmap, and if no clone bitmap exists, in the real bitmap. * * - For allocating a block, we mark it as allocated in the real bitmap, and if * a clone bitmap exists, also in the clone bitmap. * * - At the end of a log_flush, we copy the real bitmap into the clone bitmap * to make the clone bitmap reflect the current allocation state. * (Alternatively, we could remove the clone bitmap.) * * The clone bitmaps are in-core only, and is never written to disk. * * These steps ensure that blocks which have been freed in a transaction cannot * be reallocated in that same transaction.
*/ struct gfs2_bitmap { struct buffer_head *bi_bh; char *bi_clone; unsignedlong bi_flags;
u32 bi_offset;
u32 bi_start;
u32 bi_bytes;
u32 bi_blocks;
};
struct gfs2_rgrpd { struct rb_node rd_node; /* Link with superblock */ struct gfs2_glock *rd_gl; /* Glock for this rgrp */
u64 rd_addr; /* grp block disk address */
u64 rd_data0; /* first data location */
u32 rd_length; /* length of rgrp header in fs blocks */
u32 rd_data; /* num of data blocks in rgrp */
u32 rd_bitbytes; /* number of bytes in data bitmaps */
u32 rd_free;
u32 rd_requested; /* number of blocks in rd_rstree */
u32 rd_reserved; /* number of reserved blocks */
u32 rd_free_clone;
u32 rd_dinodes;
u64 rd_igeneration; struct gfs2_bitmap *rd_bits; struct gfs2_sbd *rd_sbd; struct gfs2_rgrp_lvb *rd_rgl;
u32 rd_last_alloc;
u32 rd_flags;
u32 rd_extfail_pt; /* extent failure point */ #define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */ #define GFS2_RDF_ERROR 0x40000000 /* error in rg */ #define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */ #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
spinlock_t rd_rsspin; /* protects reservation related vars */ struct mutex rd_mutex; struct rb_root rd_rstree; /* multi-block reservation tree */
};
/* * ls_recover_flags: * * DFL_BLOCK_LOCKS: dlm is in recovery and will grant locks that had been * held by failed nodes whose journals need recovery. Those locks should * only be used for journal recovery until the journal recovery is done. * This is set by the dlm recover_prep callback and cleared by the * gfs2_control thread when journal recovery is complete. To avoid * races between recover_prep setting and gfs2_control clearing, recover_spin * is held while changing this bit and reading/writing recover_block * and recover_start. * * DFL_NO_DLM_OPS: dlm lockspace ops/callbacks are not being used. * * DFL_FIRST_MOUNT: this node is the first to mount this fs and is doing * recovery of all journals before allowing other nodes to mount the fs. * This is cleared when FIRST_MOUNT_DONE is set. * * DFL_FIRST_MOUNT_DONE: this node was the first mounter, and has finished * recovery of all journals, and now allows other nodes to mount the fs. * * DFL_MOUNT_DONE: gdlm_mount has completed successfully and cleared * BLOCK_LOCKS for the first time. The gfs2_control thread should now * control clearing BLOCK_LOCKS for further recoveries. * * DFL_UNMOUNT: gdlm_unmount sets to keep sdp off gfs2_control_wq. * * DFL_DLM_RECOVERY: set while dlm is in recovery, between recover_prep() * and recover_done(), i.e. set while recover_block == recover_start.
*/
/* * We are using struct lm_lockname as an rhashtable key. Avoid holes within * the struct; padding at the end is fine.
*/ struct lm_lockname {
u64 ln_number; struct gfs2_sbd *ln_sbd; unsignedint ln_type;
};
/* Resource group multi-block reservation, in order of appearance:
Step 1. Function prepares to write, allocates a mb, sets the size hint. Step 2. User calls inplace_reserve to target an rgrp, sets the rgrp info Step 3. Function get_local_rgrp locks the rgrp, determines which bits to use Step 4. Bits are assigned from the rgrp based on either the reservation or wherever it can.
*/
struct gfs2_blkreserv { struct rb_node rs_node; /* node within rd_rstree */ struct gfs2_rgrpd *rs_rgd;
u64 rs_start;
u32 rs_requested;
u32 rs_reserved; /* number of reserved blocks */
};
/* * Allocation parameters * @target: The number of blocks we'd ideally like to allocate * @aflags: The flags (e.g. Orlov flag) * * The intent is to gradually expand this structure over time in * order to give more information, e.g. alignment, min extent size * to the allocation code.
*/ struct gfs2_alloc_parms {
u64 target;
u32 min_target;
u32 aflags;
u64 allowed;
};
/* State fields protected by gl_lockref.lock */ unsignedint gl_state:2, /* Current state */
gl_target:2, /* Target state */
gl_demote_state:2, /* State requested by remote node */
gl_req:2, /* State in last dlm request */
gl_reply:8; /* Last reply from the dlm */
unsignedlong gl_demote_time; /* time of first demote request */ long gl_hold_time; struct list_head gl_holders;
/* * Since i_inode is the first element of struct gfs2_inode, * this is effectively a cast.
*/ staticinlinestruct gfs2_inode *GFS2_I(struct inode *inode)
{ return container_of(inode, struct gfs2_inode, i_inode);
}
/* * lm_mount() return values * * ls_jid - the journal ID this node should use * ls_first - this node is the first to mount the file system * ls_lockspace - lock module's context for this file system * ls_ops - lock module's functions
*/
spinlock_t ls_recover_spin; /* protects following fields */ unsignedlong ls_recover_flags; /* DFL_ */
uint32_t ls_recover_mount; /* gen in first recover_done cb */
uint32_t ls_recover_start; /* gen in last recover_done cb */
uint32_t ls_recover_block; /* copy recover_start in last recover_prep */
uint32_t ls_recover_size; /* size of recover_submit, recover_result */
uint32_t *ls_recover_submit; /* gen in last recover_slot cb per jid */
uint32_t *ls_recover_result; /* result of last jid recovery */
};
struct gfs2_pcpu_lkstats { /* One struct for each glock type */ struct gfs2_lkstats lkstats[10];
};
/* List of local (per node) statfs inodes */ struct local_statfs_inode { struct list_head si_list; struct inode *si_sc_inode; unsignedint si_jid; /* journal id this statfs inode corresponds to */
};
u32 sd_fsb2bb;
u32 sd_fsb2bb_shift;
u32 sd_diptrs; /* Number of pointers in a dinode */
u32 sd_inptrs; /* Number of pointers in a indirect block */
u32 sd_ldptrs; /* Number of pointers in a log descriptor block */
u32 sd_jbsize; /* Size of a journaled data block */
u32 sd_hash_bsize; /* sizeof(exhash block) */
u32 sd_hash_bsize_shift;
u32 sd_hash_ptrs; /* Number of pointers in a hash block */
u32 sd_qc_per_block;
u32 sd_blocks_per_bitmap;
u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
u32 sd_max_height; /* Max height of a file's metadata tree */
u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
u32 sd_max_dents_per_leaf; /* Max number of dirents in a leaf block */
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.