down_write(&nilfs->ns_sem); if (!(nilfs->ns_mount_state & NILFS_ERROR_FS)) {
nilfs->ns_mount_state |= NILFS_ERROR_FS;
sbp = nilfs_prepare_super(sb, 0); if (likely(sbp)) {
sbp[0]->s_state |= cpu_to_le16(NILFS_ERROR_FS); if (sbp[1])
sbp[1]->s_state |= cpu_to_le16(NILFS_ERROR_FS);
nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
}
}
up_write(&nilfs->ns_sem);
}
/** * __nilfs_error() - report failure condition on a filesystem * @sb: super block instance * @function: name of calling function * @fmt: format string for message to be output * @...: optional arguments to @fmt * * __nilfs_error() sets an ERROR_FS flag on the superblock as well as * reporting an error message. This function should be called when * NILFS detects incoherences or defects of meta data on disk. * * This implements the body of nilfs_error() macro. Normally, * nilfs_error() should be used. As for sustainable errors such as a * single-shot I/O error, nilfs_err() should be used instead. * * Callers should not add a trailing newline since this will do it.
*/ void __nilfs_error(struct super_block *sb, constchar *function, constchar *fmt, ...)
{ struct the_nilfs *nilfs = sb->s_fs_info; struct va_format vaf;
va_list args;
/* nilfs->ns_sem must be locked by the caller. */ if (sbp[0]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { if (sbp[1] &&
sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
} else {
nilfs_crit(sb, "superblock broke"); return NULL;
}
} elseif (sbp[1] &&
sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) {
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
}
if (flip && sbp[1])
nilfs_swap_super_block(nilfs);
return sbp;
}
int nilfs_commit_super(struct super_block *sb, int flag)
{ struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp = nilfs->ns_sbp;
time64_t t;
/* nilfs->ns_sem must be locked by the caller. */
t = ktime_get_real_seconds();
nilfs->ns_sbwtime = t;
sbp[0]->s_wtime = cpu_to_le64(t);
sbp[0]->s_sum = 0;
sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
(unsignedchar *)sbp[0],
nilfs->ns_sbsize)); if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) {
sbp[1]->s_wtime = sbp[0]->s_wtime;
sbp[1]->s_sum = 0;
sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed,
(unsignedchar *)sbp[1],
nilfs->ns_sbsize));
}
clear_nilfs_sb_dirty(nilfs);
nilfs->ns_flushed_device = 1; /* make sure store to ns_flushed_device cannot be reordered */
smp_wmb(); return nilfs_sync_super(sb, flag);
}
/** * nilfs_cleanup_super() - write filesystem state for cleanup * @sb: super block instance to be unmounted or degraded to read-only * * This function restores state flags in the on-disk super block. * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the * filesystem was not clean previously. * * Return: 0 on success, %-EIO if I/O error or superblock is corrupted.
*/ int nilfs_cleanup_super(struct super_block *sb)
{ struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp; int flag = NILFS_SB_COMMIT; int ret = -EIO;
sbp = nilfs_prepare_super(sb, 0); if (sbp) {
sbp[0]->s_state = cpu_to_le16(nilfs->ns_mount_state);
nilfs_set_log_cursor(sbp[0], nilfs); if (sbp[1] && sbp[0]->s_last_cno == sbp[1]->s_last_cno) { /* * make the "clean" flag also to the opposite * super block if both super blocks point to * the same checkpoint.
*/
sbp[1]->s_state = sbp[0]->s_state;
flag = NILFS_SB_COMMIT_ALL;
}
ret = nilfs_commit_super(sb, flag);
} return ret;
}
/** * nilfs_move_2nd_super - relocate secondary super block * @sb: super block instance * @sb2off: new offset of the secondary super block (in bytes) * * Return: 0 on success, or a negative error code on failure.
*/ staticint nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
{ struct the_nilfs *nilfs = sb->s_fs_info; struct buffer_head *nsbh; struct nilfs_super_block *nsbp;
sector_t blocknr, newblocknr; unsignedlong offset; int sb2i; /* array index of the secondary superblock */ int ret = 0;
/* nilfs->ns_sem must be locked by the caller. */ if (nilfs->ns_sbh[1] &&
nilfs->ns_sbh[1]->b_blocknr > nilfs->ns_first_data_block) {
sb2i = 1;
blocknr = nilfs->ns_sbh[1]->b_blocknr;
} elseif (nilfs->ns_sbh[0]->b_blocknr > nilfs->ns_first_data_block) {
sb2i = 0;
blocknr = nilfs->ns_sbh[0]->b_blocknr;
} else {
sb2i = -1;
blocknr = 0;
} if (sb2i >= 0 && (u64)blocknr << nilfs->ns_blocksize_bits == sb2off) goto out; /* super block location is unchanged */
/* Get new super block buffer */
newblocknr = sb2off >> nilfs->ns_blocksize_bits;
offset = sb2off & (nilfs->ns_blocksize - 1);
nsbh = sb_getblk(sb, newblocknr); if (!nsbh) {
nilfs_warn(sb, "unable to move secondary superblock to block %llu",
(unsignedlonglong)newblocknr);
ret = -EIO; goto out;
}
nsbp = (void *)nsbh->b_data + offset;
lock_buffer(nsbh); if (sb2i >= 0) { /* * The position of the second superblock only changes by 4KiB, * which is larger than the maximum superblock data size * (= 1KiB), so there is no need to use memmove() to allow * overlap between source and destination.
*/
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
/* * Zero fill after copy to avoid overwriting in case of move * within the same block.
*/
memset(nsbh->b_data, 0, offset);
memset((void *)nsbp + nilfs->ns_sbsize, 0,
nsbh->b_size - offset - nilfs->ns_sbsize);
} else {
memset(nsbh->b_data, 0, nsbh->b_size);
}
set_buffer_uptodate(nsbh);
unlock_buffer(nsbh);
if (sb2i >= 0) {
brelse(nilfs->ns_sbh[sb2i]);
nilfs->ns_sbh[sb2i] = nsbh;
nilfs->ns_sbp[sb2i] = nsbp;
} elseif (nilfs->ns_sbh[0]->b_blocknr < nilfs->ns_first_data_block) { /* secondary super block will be restored to index 1 */
nilfs->ns_sbh[1] = nsbh;
nilfs->ns_sbp[1] = nsbp;
} else {
brelse(nsbh);
}
out: return ret;
}
/** * nilfs_resize_fs - resize the filesystem * @sb: super block instance * @newsize: new size of the filesystem (in bytes) * * Return: 0 on success, or a negative error code on failure.
*/ int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
{ struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp;
__u64 devsize, newnsegs;
loff_t sb2off; int ret;
ret = -ERANGE;
devsize = bdev_nr_bytes(sb->s_bdev); if (newsize > devsize) goto out;
/* * Prevent underflow in second superblock position calculation. * The exact minimum size check is done in nilfs_sufile_resize().
*/ if (newsize < 4096) {
ret = -ENOSPC; goto out;
}
/* * Write lock is required to protect some functions depending * on the number of segments, the number of reserved segments, * and so forth.
*/
down_write(&nilfs->ns_segctor_sem);
ret = nilfs_sufile_resize(nilfs->ns_sufile, newnsegs);
up_write(&nilfs->ns_segctor_sem); if (ret < 0) goto out;
ret = nilfs_construct_segment(sb); if (ret < 0) goto out;
down_write(&nilfs->ns_sem);
nilfs_move_2nd_super(sb, sb2off);
ret = -EIO;
sbp = nilfs_prepare_super(sb, 0); if (likely(sbp)) {
nilfs_set_log_cursor(sbp[0], nilfs); /* * Drop NILFS_RESIZE_FS flag for compatibility with * mount-time resize which may be implemented in a * future release.
*/
sbp[0]->s_state = cpu_to_le16(le16_to_cpu(sbp[0]->s_state) &
~NILFS_RESIZE_FS);
sbp[0]->s_dev_size = cpu_to_le64(newsize);
sbp[0]->s_nsegments = cpu_to_le64(nilfs->ns_nsegments); if (sbp[1])
memcpy(sbp[1], sbp[0], nilfs->ns_sbsize);
ret = nilfs_commit_super(sb, NILFS_SB_COMMIT_ALL);
}
up_write(&nilfs->ns_sem);
/* * Reset the range of allocatable segments last. This order * is important in the case of expansion because the secondary * superblock must be protected from log write until migration * completes.
*/ if (!ret)
nilfs_sufile_set_alloc_range(nilfs->ns_sufile, 0, newnsegs - 1);
out: return ret;
}
/* * Compute all of the segment blocks * * The blocks before first segment and after last segment * are excluded.
*/
blocks = nilfs->ns_blocks_per_segment * nilfs->ns_nsegments
- nilfs->ns_first_data_block;
nrsvblocks = nilfs->ns_nrsvsegs * nilfs->ns_blocks_per_segment;
/* * Compute the overhead * * When distributing meta data blocks outside segment structure, * We must count them as the overhead.
*/
overhead = 0;
err = nilfs_count_free_blocks(nilfs, &nfreeblocks); if (unlikely(err)) return err;
err = nilfs_ifile_count_free_inodes(root->ifile,
&nmaxinodes, &nfreeinodes); if (unlikely(err)) {
nilfs_warn(sb, "failed to count free inodes: err=%d", err); if (err == -ERANGE) { /* * If nilfs_palloc_count_max_entries() returns * -ERANGE error code then we simply treat * curent inodes count as maximum possible and * zero as free inodes value.
*/
nmaxinodes = atomic64_read(&root->inodes_count);
nfreeinodes = 0;
err = 0;
} else return err;
}
int nilfs_check_feature_compatibility(struct super_block *sb, struct nilfs_super_block *sbp)
{
__u64 features;
features = le64_to_cpu(sbp->s_feature_incompat) &
~NILFS_FEATURE_INCOMPAT_SUPP; if (features) {
nilfs_err(sb, "couldn't mount because of unsupported optional features (%llx)",
(unsignedlonglong)features); return -EINVAL;
}
features = le64_to_cpu(sbp->s_feature_compat_ro) &
~NILFS_FEATURE_COMPAT_RO_SUPP; if (!sb_rdonly(sb) && features) {
nilfs_err(sb, "couldn't mount RDWR because of unsupported optional features (%llx)",
(unsignedlonglong)features); return -EINVAL;
} return 0;
}
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
up_read(&nilfs->ns_segctor_sem); if (ret < 0) {
ret = (ret == -ENOENT) ? -EINVAL : ret; goto out;
} elseif (!ret) {
nilfs_err(s, "The specified checkpoint is not a snapshot (checkpoint number=%llu)",
(unsignedlonglong)cno);
ret = -EINVAL; goto out;
}
ret = nilfs_attach_checkpoint(s, cno, false, &root); if (ret) {
nilfs_err(s, "error %d while loading snapshot (checkpoint number=%llu)",
ret, (unsignedlonglong)cno); goto out;
}
ret = nilfs_get_root_dentry(s, root, root_dentry);
nilfs_put_root(root);
out:
mutex_unlock(&nilfs->ns_snapshot_mount_mutex); return ret;
}
/** * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint * @root_dentry: root dentry of the tree to be shrunk * * Return: true if the tree was in-use, false otherwise.
*/ staticbool nilfs_tree_is_busy(struct dentry *root_dentry)
{
shrink_dcache_parent(root_dentry); return d_count(root_dentry) > 1;
}
if (cno >= nilfs_last_cno(nilfs)) returntrue; /* protect recent checkpoints */
ret = false;
root = nilfs_lookup_root(nilfs, cno); if (root) {
inode = nilfs_ilookup(sb, root, NILFS_ROOT_INO); if (inode) {
dentry = d_find_alias(inode); if (dentry) {
ret = nilfs_tree_is_busy(dentry);
dput(dentry);
}
iput(inode);
}
nilfs_put_root(root);
} return ret;
}
/** * nilfs_fill_super() - initialize a super block instance * @sb: super_block * @fc: filesystem context * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. * * Return: 0 on success, or a negative error code on failure.
*/ staticint
nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
{ struct the_nilfs *nilfs; struct nilfs_root *fsroot; struct nilfs_fs_context *ctx = fc->fs_private;
__u64 cno; int err;
nilfs = alloc_nilfs(sb); if (!nilfs) return -ENOMEM;
sb->s_fs_info = nilfs;
err = init_nilfs(nilfs, sb); if (err) goto failed_nilfs;
/* Copy in parsed mount options */
nilfs->ns_mount_opt = ctx->ns_mount_opt;
if (!nilfs_valid_fs(nilfs)) {
nilfs_warn(sb, "couldn't remount because the filesystem is in an incomplete recovery state"); goto ignore_opts;
} if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb)) goto out; if (fc->sb_flags & SB_RDONLY) {
sb->s_flags |= SB_RDONLY;
/* * Remounting a valid RW partition RDONLY, so set * the RDONLY flag and then mark the partition as valid again.
*/
down_write(&nilfs->ns_sem);
nilfs_cleanup_super(sb);
up_write(&nilfs->ns_sem);
} else {
__u64 features; struct nilfs_root *root;
/* * Mounting a RDONLY partition read-write, so reread and * store the current valid flag. (It may have been changed * by fsck since we originally mounted the partition.)
*/
down_read(&nilfs->ns_sem);
features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
~NILFS_FEATURE_COMPAT_RO_SUPP;
up_read(&nilfs->ns_sem); if (features) {
nilfs_warn(sb, "couldn't remount RDWR because of unsupported optional features (%llx)",
(unsignedlonglong)features);
err = -EROFS; goto ignore_opts;
}
if (ctx->cno && !(fc->sb_flags & SB_RDONLY)) {
nilfs_err(NULL, "invalid option \"cp=%llu\": read-only option is not specified",
ctx->cno); return -EINVAL;
}
err = lookup_bdev(fc->source, &dev); if (err) return err;
s = sget_dev(fc, dev); if (IS_ERR(s)) return PTR_ERR(s);
if (!s->s_root) {
err = setup_bdev_super(s, fc->sb_flags, fc); if (!err)
err = nilfs_fill_super(s, fc); if (err) goto failed_super;
s->s_flags |= SB_ACTIVE;
} elseif (!ctx->cno) { if (nilfs_tree_is_busy(s->s_root)) { if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
nilfs_err(s, "the device already has a %s mount.",
sb_rdonly(s) ? "read-only" : "read/write");
err = -EBUSY; goto failed_super;
}
} else { /* * Try reconfigure to setup mount states if the current * tree is not mounted and only snapshots use this sb. * * Since nilfs_reconfigure() requires fc->root to be * set, set it first and release it on failure.
*/
fc->root = dget(s->s_root);
err = nilfs_reconfigure(fc); if (err) {
dput(fc->root);
fc->root = NULL; /* prevent double release */ goto failed_super;
} return 0;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.