/* * NOTE: information filled in here is typically reflected in the * output of the system command 'df'
*/ staticint orangefs_statfs(struct dentry *dentry, struct kstatfs *buf)
{ int ret = -ENOMEM; struct orangefs_kernel_op_s *new_op = NULL; int flags = 0; struct super_block *sb = NULL;
sb = dentry->d_sb;
gossip_debug(GOSSIP_SUPER_DEBUG, "%s: called on sb %p (fs_id is %d)\n",
__func__,
sb,
(int)(ORANGEFS_SB(sb)->fs_id));
new_op = op_alloc(ORANGEFS_VFS_OP_STATFS); if (!new_op) return ret;
new_op->upcall.req.statfs.fs_id = ORANGEFS_SB(sb)->fs_id;
if (ORANGEFS_SB(sb)->flags & ORANGEFS_OPT_INTR)
flags = ORANGEFS_OP_INTERRUPTIBLE;
ret = service_operation(new_op, "orangefs_statfs", flags);
if (new_op->downcall.status < 0) goto out_op_release;
gossip_debug(GOSSIP_SUPER_DEBUG, "%s: got %ld blocks available | " "%ld blocks total | %ld block size | " "%ld files total | %ld files avail\n",
__func__,
(long)new_op->downcall.resp.statfs.blocks_avail,
(long)new_op->downcall.resp.statfs.blocks_total,
(long)new_op->downcall.resp.statfs.block_size,
(long)new_op->downcall.resp.statfs.files_total,
(long)new_op->downcall.resp.statfs.files_avail);
/* * Remount as initiated by VFS layer. We just need to reparse the mount * options, no need to signal pvfs2-client-core about it.
*/ staticint orangefs_reconfigure(struct fs_context *fc)
{ struct super_block *sb = fc->root->d_sb; struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(sb); struct orangefs_sb_info_s *revised = fc->s_fs_info; unsignedint flags;
/* * Remount as initiated by pvfs2-client-core on restart. This is used to * repopulate mount information left from previous pvfs2-client-core. * * the idea here is that given a valid superblock, we're * re-initializing the user space client with the initial mount * information specified when the super block was first initialized. * this is very different than the first initialization/creation of a * superblock. we use the special service_priority_operation to make * sure that the mount gets ahead of any other pending operation that * is waiting for servicing. this means that the pvfs2-client won't * fail to start several times for all other pending operations before * the client regains all of the mount information from us. * NOTE: this function assumes that the request_mutex is already acquired!
*/ int orangefs_remount(struct orangefs_sb_info_s *orangefs_sb)
{ struct orangefs_kernel_op_s *new_op; int ret = -EINVAL;
new_op = op_alloc(ORANGEFS_VFS_OP_FS_MOUNT); if (!new_op) return -ENOMEM;
strscpy(new_op->upcall.req.fs_mount.orangefs_config_server,
orangefs_sb->devname);
gossip_debug(GOSSIP_SUPER_DEBUG, "Attempting ORANGEFS Remount via host %s\n",
new_op->upcall.req.fs_mount.orangefs_config_server);
/* * we assume that the calling function has already acquired the * request_mutex to prevent other operations from bypassing * this one
*/
ret = service_operation(new_op, "orangefs_remount",
ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX);
gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_remount: mount got return value of %d\n",
ret); if (ret == 0) { /* * store the id assigned to this sb -- it's just a * short-lived mapping that the system interface uses * to map this superblock to a particular mount entry
*/
orangefs_sb->id = new_op->downcall.resp.fs_mount.id;
orangefs_sb->mount_pending = 0;
}
op_release(new_op);
if (orangefs_userspace_version >= 20906) {
new_op = op_alloc(ORANGEFS_VFS_OP_FEATURES); if (!new_op) return -ENOMEM;
new_op->upcall.req.features.features = 0;
ret = service_operation(new_op, "orangefs_features",
ORANGEFS_OP_PRIORITY | ORANGEFS_OP_NO_MUTEX); if (!ret)
orangefs_features =
new_op->downcall.resp.features.features; else
orangefs_features = 0;
op_release(new_op);
} else {
orangefs_features = 0;
}
staticint orangefs_unmount(int id, __s32 fs_id, constchar *devname)
{ struct orangefs_kernel_op_s *op; int r;
op = op_alloc(ORANGEFS_VFS_OP_FS_UMOUNT); if (!op) return -ENOMEM;
op->upcall.req.fs_umount.id = id;
op->upcall.req.fs_umount.fs_id = fs_id;
strscpy(op->upcall.req.fs_umount.orangefs_config_server, devname);
r = service_operation(op, "orangefs_fs_umount", 0); /* Not much to do about an error here. */ if (r)
gossip_err("orangefs_unmount: service_operation %d\n", r);
op_release(op); return r;
}
/* Hang the xattr handlers off the superblock */
sb->s_xattr = orangefs_xattr_handlers;
sb->s_magic = ORANGEFS_SUPER_MAGIC;
sb->s_op = &orangefs_s_ops;
set_default_d_op(sb, &orangefs_dentry_operations);
gossip_debug(GOSSIP_SUPER_DEBUG, "Attempting ORANGEFS Mount via host %s\n",
new_op->upcall.req.fs_mount.orangefs_config_server);
ret = service_operation(new_op, "orangefs_mount", 0);
gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_mount: mount got return value of %d\n", ret); if (ret) goto free_op;
if (new_op->downcall.resp.fs_mount.fs_id == ORANGEFS_FS_ID_NULL) {
gossip_err("ERROR: Retrieved null fs_id\n");
ret = -EINVAL; goto free_op;
}
sb = sget_fc(fc, NULL, set_anon_super_fc);
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
orangefs_unmount(new_op->downcall.resp.fs_mount.id,
new_op->downcall.resp.fs_mount.fs_id,
fc->source); goto free_op;
}
/* init our private orangefs sb info */
ret = orangefs_fill_sb(sb, fc, &new_op->downcall.resp.fs_mount);
if (ret) goto free_sb_and_op;
/* * on successful mount, store the devname and data * used
*/
strscpy(ORANGEFS_SB(sb)->devname, fc->source);
/* mount_pending must be cleared */
ORANGEFS_SB(sb)->mount_pending = 0;
/* * finally, add this sb to our list of known orangefs * sb's
*/
gossip_debug(GOSSIP_SUPER_DEBUG, "Adding SB %p to orangefs superblocks\n",
ORANGEFS_SB(sb));
spin_lock(&orangefs_superblocks_lock);
list_add_tail(&ORANGEFS_SB(sb)->list, &orangefs_superblocks);
spin_unlock(&orangefs_superblocks_lock);
op_release(new_op);
/* Must be removed from the list now. */
ORANGEFS_SB(sb)->no_list = 0;
free_sb_and_op: /* Will call orangefs_kill_sb with sb not in list. */
ORANGEFS_SB(sb)->no_list = 1; /* ORANGEFS_VFS_OP_FS_UMOUNT is done by orangefs_kill_sb. */
deactivate_locked_super(sb);
free_op:
gossip_err("orangefs_mount: mount request failed with %d\n", ret); if (ret == -EINVAL) {
gossip_err("Ensure that all orangefs-servers have the same FS configuration files\n");
gossip_err("Look at pvfs2-client-core log file (typically /tmp/pvfs2-client.log) for more details\n");
}
/* * Set up the filesystem mount context.
*/ int orangefs_init_fs_context(struct fs_context *fc)
{ struct orangefs_sb_info_s *osi;
osi = kzalloc(sizeof(struct orangefs_sb_info_s), GFP_KERNEL); if (!osi) return -ENOMEM;
/* * Force any potential flags that might be set from the mount * to zero, ie, initialize to unset.
*/
fc->sb_flags_mask &= ~SB_POSIXACL;
osi->flags &= ~ORANGEFS_OPT_INTR;
osi->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
void orangefs_kill_sb(struct super_block *sb)
{ int r;
gossip_debug(GOSSIP_SUPER_DEBUG, "orangefs_kill_sb: called\n");
/* provided sb cleanup */
kill_anon_super(sb);
if (!ORANGEFS_SB(sb)) {
mutex_lock(&orangefs_request_mutex);
mutex_unlock(&orangefs_request_mutex); return;
} /* * issue the unmount to userspace to tell it to remove the * dynamic mount info it has for this superblock
*/
r = orangefs_unmount(ORANGEFS_SB(sb)->id, ORANGEFS_SB(sb)->fs_id,
ORANGEFS_SB(sb)->devname); if (!r)
ORANGEFS_SB(sb)->mount_pending = 1;
if (!ORANGEFS_SB(sb)->no_list) { /* remove the sb from our list of orangefs specific sb's */
spin_lock(&orangefs_superblocks_lock); /* not list_del_init */
__list_del_entry(&ORANGEFS_SB(sb)->list);
ORANGEFS_SB(sb)->list.prev = NULL;
spin_unlock(&orangefs_superblocks_lock);
}
/* * make sure that ORANGEFS_DEV_REMOUNT_ALL loop that might've seen us * gets completed before we free the dang thing.
*/
mutex_lock(&orangefs_request_mutex);
mutex_unlock(&orangefs_request_mutex);
/* free the orangefs superblock private data */
kfree(ORANGEFS_SB(sb));
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.