/* * Signal that a direct IO write is in progress, to avoid deadlock for sync * direct IO writes when fsync is called during the direct IO write path.
*/ #define BTRFS_TRANS_DIO_WRITE_STUB ((void *) 1)
/* Radix-tree tag for roots that are part of the transaction. */ #define BTRFS_ROOT_TRANS_TAG 0
struct btrfs_transaction {
u64 transid; /* * total external writers(USERSPACE/START/ATTACH) in this * transaction, it must be zero before the transaction is * being committed
*/
atomic_t num_extwriters; /* * total writers in this transaction, it must be zero before the * transaction can end
*/
atomic_t num_writers;
refcount_t use_count;
unsignedlong flags;
/* Be protected by fs_info->trans_lock when we want to change it. */ enum btrfs_trans_state state; int aborted; struct list_head list; struct extent_io_tree dirty_pages;
time64_t start_time;
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait; struct list_head pending_snapshots; struct list_head dev_update_list; struct list_head switch_commits; struct list_head dirty_bgs;
/* * There is no explicit lock which protects io_bgs, rather its * consistency is implied by the fact that all the sites which modify * it do so under some form of transaction critical section, namely: * * - btrfs_start_dirty_block_groups - This function can only ever be * run by one of the transaction committers. Refer to * BTRFS_TRANS_DIRTY_BG_RUN usage in btrfs_commit_transaction * * - btrfs_write_dirty_blockgroups - this is called by * commit_cowonly_roots from transaction critical section * (TRANS_STATE_COMMIT_DOING) * * - btrfs_cleanup_dirty_bgs - called on transaction abort
*/ struct list_head io_bgs; struct list_head dropped_roots; struct extent_io_tree pinned_extents;
/* * we need to make sure block group deletion doesn't race with * free space cache writeout. This mutex keeps them from stomping * on each other
*/ struct mutex cache_write_mutex;
spinlock_t dirty_bgs_lock; /* Protected by spin lock fs_info->unused_bgs_lock. */ struct list_head deleted_bgs;
spinlock_t dropped_roots_lock; struct btrfs_delayed_ref_root delayed_refs; struct btrfs_fs_info *fs_info;
/* * Number of ordered extents the transaction must wait for before * committing. These are ordered extents started by a fast fsync.
*/
atomic_t pending_ordered;
wait_queue_head_t pending_wait;
};
struct btrfs_trans_handle {
u64 transid;
u64 bytes_reserved;
u64 delayed_refs_bytes_reserved;
u64 chunk_bytes_reserved; unsignedlong delayed_ref_updates; unsignedlong delayed_ref_csum_deletions; struct btrfs_transaction *transaction; struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *orig_rsv; /* Set by a task that wants to create a snapshot. */ struct btrfs_pending_snapshot *pending_snapshot;
refcount_t use_count; unsignedint type; /* * Error code of transaction abort, set outside of locks and must use * the READ_ONCE/WRITE_ONCE access
*/ short aborted; bool adding_csums; bool allocating_chunk; bool removing_chunk; bool reloc_reserved; bool in_fsync; struct btrfs_fs_info *fs_info; struct list_head new_bgs; struct btrfs_block_rsv delayed_rsv;
};
/* * The abort status can be changed between calls and is not protected by locks. * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's * set to a non-zero value it does not change, so the macro should be in checks * but is not necessary for further reads of the value.
*/ #define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted)))
struct btrfs_pending_snapshot { struct dentry *dentry; struct btrfs_inode *dir; struct btrfs_root *root; struct btrfs_root_item *root_item; struct btrfs_root *snap; struct btrfs_qgroup_inherit *inherit; struct btrfs_path *path; /* block reservation for the operation */ struct btrfs_block_rsv block_rsv; /* extra metadata reservation for relocation */ int error; /* Preallocated anonymous block device number */
dev_t anon_dev; bool readonly; struct list_head list;
};
/* * Make qgroup codes to skip given qgroupid, means the old/new_roots for * qgroup won't contain the qgroupid in it.
*/ staticinlinevoid btrfs_set_skip_qgroup(struct btrfs_trans_handle *trans,
u64 qgroupid)
{ struct btrfs_delayed_ref_root *delayed_refs;
/* * We want the transaction abort to print stack trace only for errors where the * cause could be a bug, eg. due to ENOSPC, and not for common errors that are * caused by external factors.
*/ staticinlinebool btrfs_abort_should_print_stack(int error)
{ switch (error) { case -EIO: case -EROFS: case -ENOMEM: returnfalse;
} returntrue;
}
/* * Call btrfs_abort_transaction as early as possible when an error condition is * detected, that way the exact stack trace is reported for some errors.
*/ #define btrfs_abort_transaction(trans, error) \ do { \ bool __first = false; \ /* Report first abort since mount */ \ if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
&((trans)->fs_info->fs_state))) { \
__first = true; \ if (WARN(btrfs_abort_should_print_stack(error), \
KERN_ERR \ "BTRFS: Transaction aborted (error %d)\n", \
(error))) { \ /* Stack trace printed. */ \
} else { \
btrfs_err((trans)->fs_info, \ "Transaction aborted (error %d)", \
(error)); \
} \
} \
__btrfs_abort_transaction((trans), __func__, \
__LINE__, (error), __first); \
} while (0)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.