struct ceph_mds_cap_match {
s64 uid; /* default to MDS_AUTH_UID_ANY */
u32 num_gids;
u32 *gids; /* use these GIDs */ char *path; /* require path to be child of this
(may be "" or "/" for any) */ char *fs_name; bool root_squash; /* default to false */
};
/* * parsed info about an mds reply, including information about * either: 1) the target inode and/or its parent directory and dentry, * and directory contents (for readdir results), or * 2) the file range lock info (for fcntl F_GETLK results).
*/ struct ceph_mds_reply_info_parsed { struct ceph_mds_reply_head *head;
/* encoded blob describing snapshot contexts for certain
operations (e.g., open) */ void *snapblob; int snapblob_len;
};
/* * cap releases are batched and sent to the MDS en masse. * * Account for per-message overhead of mds_cap_release header * and __le32 for osd epoch barrier trailing field.
*/ #define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - sizeof(u32) - \ sizeof(struct ceph_mds_cap_release)) / \ sizeof(struct ceph_mds_cap_item))
/* * modes of choosing which MDS to send a request to
*/ enum {
USE_ANY_MDS,
USE_RANDOM_MDS,
USE_AUTH_MDS, /* prefer authoritative mds for this metadata item */
};
u8 *r_altname; /* fscrypt binary crypttext for long filenames */
u32 r_altname_len; /* length of r_altname */
int r_fmode; /* file mode, if expecting cap */ int r_request_release_offset; conststruct cred *r_cred; struct mnt_idmap *r_mnt_idmap; struct timespec64 r_stamp;
/* for choosing which mds to send this request to */ int r_direct_mode;
u32 r_direct_hash; /* choose dir frag based on this dentry hash */
/* data payload is used for xattr ops */ struct ceph_pagelist *r_pagelist;
/* what caps shall we drop? */ int r_inode_drop, r_inode_unless; int r_dentry_drop, r_dentry_unless; int r_old_dentry_drop, r_old_dentry_unless; struct inode *r_old_inode; int r_old_inode_drop, r_old_inode_unless;
struct ceph_msg *r_request; /* original request */ struct ceph_msg *r_reply; struct ceph_mds_reply_info_parsed r_reply_info; int r_err;
u32 r_readdir_offset;
struct page *r_locked_page; int r_dir_caps; int r_num_caps;
unsignedlong r_timeout; /* optional. jiffies, 0 is "wait forever" */ unsignedlong r_started; /* start time to measure timeout against */ unsignedlong r_start_latency; /* start time to measure latency */ unsignedlong r_end_latency; /* finish time to measure latency */ unsignedlong r_request_started; /* start time for mds request only,
used to measure lease durations */
/* link unsafe requests to parent directory, for fsync */ struct inode *r_unsafe_dir; struct list_head r_unsafe_dir_item;
/* unsafe requests that modify the target inode */ struct list_head r_unsafe_target_item;
struct ceph_mds_session *r_session;
int r_attempts; /* resend attempts */ int r_num_fwd; /* number of forward attempts */ int r_resend_mds; /* mds to resend to next, if any*/
u32 r_sent_on_mseq; /* cap mseq request was sent at*/
u64 r_deleg_ino;
/* * node for list of quotarealm inodes that are not visible from the filesystem * mountpoint, but required to handle, e.g. quotas.
*/ struct ceph_quotarealm_inode { struct rb_node node;
u64 ino; unsignedlong timeout; /* last time a lookup failed for this inode */ struct mutex mutex; struct inode *inode;
};
#ifdef CONFIG_DEBUG_FS
struct cap_wait { struct list_head list;
u64 ino;
pid_t tgid; int need; int want;
};
struct ceph_mds_session **sessions; /* NULL for mds if no session */
atomic_t num_sessions; int max_sessions; /* len of sessions array */
spinlock_t stopping_lock; /* protect snap_empty */ int stopping; /* the stage of shutting down */
atomic_t stopping_blockers; struct completion stopping_waiter;
atomic64_t quotarealms_count; /* # realms with quota */ /* * We keep a list of inodes we don't see in the mountpoint but that we * need to track quota realms.
*/ struct rb_root quotarealms_inodes; struct mutex quotarealms_inodes_mutex;
/* * snap_rwsem will cover cap linkage into snaprealms, and * realm snap contexts. (later, we can do per-realm snap * contexts locks..) the empty list contains realms with no * references (implying they contain no inodes with caps) that * should be destroyed.
*/
u64 last_snap_seq; struct rw_semaphore snap_rwsem; struct rb_root snap_realms; struct list_head snap_empty; int num_snap_realms;
spinlock_t snap_empty_lock; /* protect snap_empty */
u64 last_tid; /* most recent mds request */
u64 oldest_tid; /* oldest incomplete mds request,
excluding setfilelock requests */ struct rb_root request_tree; /* pending mds requests */ struct delayed_work delayed_work; /* delayed work */ unsignedlong last_renew_caps; /* last time we renewed our caps */ struct list_head cap_delay_list; /* caps with delayed release */ struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
spinlock_t cap_delay_lock; /* protects cap_delay_list and cap_unlink_delay_list */ struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock;
u64 last_cap_flush_tid; struct list_head cap_flush_list; struct list_head cap_dirty_migrating; /* ...that are migration... */ int num_cap_flushing; /* # caps we are flushing */
spinlock_t cap_dirty_lock; /* protects above items */
wait_queue_head_t cap_flushing_wq;
/* * Cap reservations * * Maintain a global pool of preallocated struct ceph_caps, referenced * by struct ceph_caps_reservations. This ensures that we preallocate * memory needed to successfully process an MDS response. (If an MDS * sends us cap information and we fail to process it, we will have * problems due to the client and MDS being out of sync.) * * Reservations are 'owned' by a ceph_cap_reservation context.
*/
spinlock_t caps_list_lock; struct list_head caps_list; /* unused (reserved or
unreserved) */ #ifdef CONFIG_DEBUG_FS struct list_head cap_wait_list; #endif int caps_total_count; /* total caps allocated */ int caps_use_count; /* in use */ int caps_use_max; /* max used caps */ int caps_reserve_count; /* unused, reserved */ int caps_avail_count; /* unused, unreserved */ int caps_min_count; /* keep at least this many
(unreserved) */
spinlock_t dentry_list_lock; struct list_head dentry_leases; /* fifo list */ struct list_head dentry_dir_leases; /* lru list */
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.