/* * ialloc.c contains the inodes allocation and deallocation routines
*/
/* * The free inodes are managed by bitmaps. A file system contains several * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap * block for inodes, N blocks for the inode table and data blocks. * * The file system contains group descriptors which are located after the * super block. Each descriptor contains the number of the bitmap block and * the free blocks count in the block.
*/
/* * To avoid calling the atomic setbit hundreds or thousands of times, we only * need to use it within a single byte (to ensure we get endianness right). * We can use memset for the rest of the bitmap as there are no other users.
*/ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
{ int i;
if (start_bit >= end_bit) return;
ext4_debug("mark end bits +%d through +%d used\n", start_bit, end_bit); for (i = start_bit; i < ((start_bit + 7) & ~7UL); i++)
ext4_set_bit(i, bitmap); if (i < end_bit)
memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
}
void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
{ if (uptodate) {
set_buffer_uptodate(bh);
set_bitmap_uptodate(bh);
}
unlock_buffer(bh);
put_bh(bh);
}
/* * Read the inode allocation bitmap for a given block_group, reading * into the specified slot in the superblock's bitmap cache. * * Return buffer_head of bitmap on success, or an ERR_PTR on error.
*/ staticstruct buffer_head *
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
{ struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); struct buffer_head *bh = NULL;
ext4_fsblk_t bitmap_blk; int err;
desc = ext4_get_group_desc(sb, block_group, NULL); if (!desc) return ERR_PTR(-EFSCORRUPTED);
/* * NOTE! When we get the inode, we're the only people * that have access to it, and as such there are no * race conditions we have to worry about. The inode * is not on the hash-lists, and it cannot be reached * through the filesystem because the directory entry * has been deleted earlier. * * HOWEVER: we must make sure that we get no aliases, * which means that we have to call "clear_inode()" * _before_ we mark the inode not in use in the inode * bitmaps. Otherwise a newly created file might use * the same inode number (not actually the same pointer * though), and then we'd have two inodes sharing the * same inode number and space on the harddisk.
*/ void ext4_free_inode(handle_t *handle, struct inode *inode)
{ struct super_block *sb = inode->i_sb; int is_directory; unsignedlong ino; struct buffer_head *bitmap_bh = NULL; struct buffer_head *bh2;
ext4_group_t block_group; unsignedlong bit; struct ext4_group_desc *gdp; struct ext4_super_block *es; struct ext4_sb_info *sbi; int fatal = 0, err, count, cleared; struct ext4_group_info *grp;
if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
percpu_counter_inc(&sbi->s_freeinodes_counter); if (sbi->s_log_groups_per_flex) { struct flex_groups *fg;
/* * Helper function for Orlov's allocator; returns critical information * for a particular block group or flex_bg. If flex_size is 1, then g * is a block group number; otherwise it is flex_bg number.
*/ staticvoid get_orlov_stats(struct super_block *sb, ext4_group_t g, int flex_size, struct orlov_stats *stats)
{ struct ext4_group_desc *desc;
/* * Orlov's allocator for directories. * * We always try to spread first-level directories. * * If there are blockgroups with both free inodes and free clusters counts * not worse than average we return one with smallest directory count. * Otherwise we simply return a random group. * * For the rest rules look so: * * It's OK to put directory into a group unless * it has too many directories already (max_dirs) or * it has too few free inodes left (min_inodes) or * it has too few free clusters left (min_clusters) or * Parent's group is preferred, if it doesn't satisfy these * conditions we search cyclically through the rest. If none * of the groups look good we just look for a group with more * free inodes than average (starting at parent's group).
*/
if (S_ISDIR(mode) &&
((parent == d_inode(sb->s_root)) ||
(ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) { int best_ndir = inodes_per_group; int ret = -1;
if (qstr) {
hinfo.hash_version = DX_HASH_HALF_MD4;
hinfo.seed = sbi->s_hash_seed;
ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
parent_group = hinfo.hash % ngroups;
} else
parent_group = get_random_u32_below(ngroups); for (i = 0; i < ngroups; i++) {
g = (parent_group + i) % ngroups;
get_orlov_stats(sb, g, flex_size, &stats); if (!stats.free_inodes) continue; if (stats.used_dirs >= best_ndir) continue; if (stats.free_inodes < avefreei) continue; if (stats.free_clusters < avefreec) continue;
grp = g;
ret = 0;
best_ndir = stats.used_dirs;
} if (ret) goto fallback;
found_flex_bg: if (flex_size == 1) {
*group = grp; return 0;
}
/* * We pack inodes at the beginning of the flexgroup's * inode tables. Block allocation decisions will do * something similar, although regular files will * start at 2nd block group of the flexgroup. See * ext4_ext_find_goal() and ext4_find_near().
*/
grp *= flex_size; for (i = 0; i < flex_size; i++) { if (grp+i >= real_ngroups) break;
desc = ext4_get_group_desc(sb, grp+i, NULL); if (desc && ext4_free_inodes_count(sb, desc)) {
*group = grp+i; return 0;
}
} goto fallback;
}
/* * Start looking in the flex group where we last allocated an * inode for this parent directory
*/ if (EXT4_I(parent)->i_last_alloc_group != ~0) {
parent_group = EXT4_I(parent)->i_last_alloc_group; if (flex_size > 1)
parent_group >>= sbi->s_log_groups_per_flex;
}
for (i = 0; i < ngroups; i++) {
grp = (parent_group + i) % ngroups;
get_orlov_stats(sb, grp, flex_size, &stats); if (stats.used_dirs >= max_dirs) continue; if (stats.free_inodes < min_inodes) continue; if (stats.free_clusters < min_clusters) continue; goto found_flex_bg;
}
if (avefreei) { /* * The free-inodes counter is approximate, and for really small * filesystems the above test can fail to find any blockgroups
*/
avefreei = 0; goto fallback_retry;
}
return -1;
}
staticint find_group_other(struct super_block *sb, struct inode *parent,
ext4_group_t *group, umode_t mode)
{
ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
ext4_group_t i, last, ngroups = ext4_get_groups_count(sb); struct ext4_group_desc *desc; int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
/* * Try to place the inode is the same flex group as its * parent. If we can't find space, use the Orlov algorithm to * find another flex group, and store that information in the * parent directory's inode information so that use that flex * group for future allocations.
*/ if (flex_size > 1) { int retry = 0;
try_again:
parent_group &= ~(flex_size-1);
last = parent_group + flex_size; if (last > ngroups)
last = ngroups; for (i = parent_group; i < last; i++) {
desc = ext4_get_group_desc(sb, i, NULL); if (desc && ext4_free_inodes_count(sb, desc)) {
*group = i; return 0;
}
} if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
retry = 1;
parent_group = EXT4_I(parent)->i_last_alloc_group; goto try_again;
} /* * If this didn't work, use the Orlov search algorithm * to find a new flex group; we pass in the mode to * avoid the topdir algorithms.
*/
*group = parent_group + flex_size; if (*group > ngroups)
*group = 0; return find_group_orlov(sb, parent, group, mode, NULL);
}
/* * Try to place the inode in its parent directory
*/
*group = parent_group;
desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc) &&
ext4_free_group_clusters(sb, desc)) return 0;
/* * We're going to place this inode in a different blockgroup from its * parent. We want to cause files in a common directory to all land in * the same blockgroup. But we want files which are in a different * directory which shares a blockgroup with our parent to land in a * different blockgroup. * * So add our directory's i_ino into the starting point for the hash.
*/
*group = (*group + parent->i_ino) % ngroups;
/* * Use a quadratic hash to find a group with a free inode and some free * blocks.
*/ for (i = 1; i < ngroups; i <<= 1) {
*group += i; if (*group >= ngroups)
*group -= ngroups;
desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc) &&
ext4_free_group_clusters(sb, desc)) return 0;
}
/* * That failed: try linear search for a free inode, even if that group * has no free blocks.
*/
*group = parent_group; for (i = 0; i < ngroups; i++) { if (++*group >= ngroups)
*group = 0;
desc = ext4_get_group_desc(sb, *group, NULL); if (desc && ext4_free_inodes_count(sb, desc)) return 0;
}
return -1;
}
/* * In no journal mode, if an inode has recently been deleted, we want * to avoid reusing it until we're reasonably sure the inode table * block has been written back to disk. (Yes, these values are * somewhat arbitrary...)
*/ #define RECENTCY_MIN 60 #define RECENTCY_DIRTY 300
staticint recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
{ struct ext4_group_desc *gdp; struct ext4_inode *raw_inode; struct buffer_head *bh; int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; int offset, ret = 0; int recentcy = RECENTCY_MIN;
u32 dtime, now;
gdp = ext4_get_group_desc(sb, group, NULL); if (unlikely(!gdp)) return 0;
bh = sb_find_get_block(sb, ext4_inode_table(sb, gdp) +
(ino / inodes_per_block)); if (!bh || !buffer_uptodate(bh)) /* * If the block is not in the buffer cache, then it * must have been written out, or, most unlikely, is * being migrated - false failure should be OK here.
*/ goto out;
/* i_dtime is only 32 bits on disk, but we only care about relative * times in the range of a few minutes (i.e. long enough to sync a * recently-deleted inode to disk), so using the low 32 bits of the
* clock (a 68 year range) is enough, see time_before32() */
dtime = le32_to_cpu(raw_inode->i_dtime);
now = ktime_get_real_seconds(); if (buffer_dirty(bh))
recentcy += RECENTCY_DIRTY;
if (dtime && time_before32(dtime, now) &&
time_before32(now, dtime + recentcy))
ret = 1;
out:
brelse(bh); return ret;
}
if (check_recently_deleted && recently_deleted(sb, group, *ino)) {
recently_deleted_ino = *ino;
*ino = *ino + 1; if (*ino < EXT4_INODES_PER_GROUP(sb)) goto next; goto not_found;
} return 1;
not_found: if (recently_deleted_ino >= EXT4_INODES_PER_GROUP(sb)) return 0; /* * Not reusing recently deleted inodes is mostly a preference. We don't * want to report ENOSPC or skew allocation patterns because of that. * So return even recently deleted inode if we could find better in the * given range.
*/
*ino = recently_deleted_ino; return 1;
}
int ext4_mark_inode_used(struct super_block *sb, int ino)
{ unsignedlong max_ino = le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count); struct buffer_head *inode_bitmap_bh = NULL, *group_desc_bh = NULL; struct ext4_group_desc *gdp;
ext4_group_t group; int bit; int err;
if (ino < EXT4_FIRST_INO(sb) || ino > max_ino) return -EFSCORRUPTED;
group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); if (IS_ERR(inode_bitmap_bh)) return PTR_ERR(inode_bitmap_bh);
if (ext4_test_bit(bit, inode_bitmap_bh->b_data)) {
err = 0; goto out;
}
gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp) {
err = -EINVAL; goto out;
}
/* We may have to initialize the block bitmap if it isn't already */ if (ext4_has_group_desc_csum(sb) &&
gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { struct buffer_head *block_bitmap_bh;
/* recheck and clear flag under lock if we still need to */
ext4_lock_group(sb, group); if (ext4_has_group_desc_csum(sb) &&
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
ext4_block_bitmap_csum_set(sb, gdp, block_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
brelse(block_bitmap_bh);
if (err) {
ext4_std_error(sb, err); goto out;
}
}
/* Update the relevant bg descriptor fields */ if (ext4_has_group_desc_csum(sb)) { int free;
ext4_lock_group(sb, group); /* while we modify the bg desc */
free = EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
free = 0;
}
/* * Check the relative inode number against the last used * relative inode number in this group. if it is greater * we need to update the bg_itable_unused count
*/ if (bit >= free)
ext4_itable_unused_set(sb, gdp,
(EXT4_INODES_PER_GROUP(sb) - bit - 1));
} else {
ext4_lock_group(sb, group);
}
#ifdef CONFIG_SECURITY
{ int num_security_xattrs = 1;
#ifdef CONFIG_INTEGRITY
num_security_xattrs++; #endif /* * We assume that security xattrs are never more than 1k. * In practice they are under 128 bytes.
*/
nblocks += num_security_xattrs *
__ext4_xattr_set_credits(sb, NULL /* inode */,
NULL /* block_bh */, 1024, true/* is_create */);
} #endif if (encrypt)
nblocks += __ext4_xattr_set_credits(sb,
NULL /* inode */,
NULL /* block_bh */,
FSCRYPT_SET_CONTEXT_MAX_SIZE, true/* is_create */); return nblocks;
}
/* * There are two policies for allocating an inode. If the new inode is * a directory, then a forward search is made for a block group with both * free space and a low directory-to-inode ratio; if that fails, then of * the groups with above-average free space, that group with the fewest * directories already is chosen. * * For other inodes, search forward from the parent directory's block * group to find a free inode.
*/ struct inode *__ext4_new_inode(struct mnt_idmap *idmap,
handle_t *handle, struct inode *dir,
umode_t mode, conststruct qstr *qstr,
__u32 goal, uid_t *owner, __u32 i_flags, int handle_type, unsignedint line_no, int nblocks)
{ struct super_block *sb; struct buffer_head *inode_bitmap_bh = NULL; struct buffer_head *group_desc_bh;
ext4_group_t ngroups, group = 0; unsignedlong ino = 0; struct inode *inode; struct ext4_group_desc *gdp = NULL; struct ext4_inode_info *ei; struct ext4_sb_info *sbi; int ret2, err; struct inode *ret;
ext4_group_t i;
ext4_group_t flex_group; struct ext4_group_info *grp = NULL; bool encrypt = false;
/* Cannot create files in a deleted directory */ if (!dir || !dir->i_nlink) return ERR_PTR(-EPERM);
sb = dir->i_sb;
sbi = EXT4_SB(sb);
ret2 = ext4_emergency_state(sb); if (unlikely(ret2)) return ERR_PTR(ret2);
ngroups = ext4_get_groups_count(sb);
trace_ext4_request_inode(dir, mode);
inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM);
ei = EXT4_I(inode);
/* * Initialize owners and quota early so that we don't have to account * for quota initialization worst case in standard inode creating * transaction
*/ if (owner) {
inode->i_mode = mode;
i_uid_write(inode, owner[0]);
i_gid_write(inode, owner[1]);
} elseif (test_opt(sb, GRPID)) {
inode->i_mode = mode;
inode_fsuid_set(inode, idmap);
inode->i_gid = dir->i_gid;
} else
inode_init_owner(idmap, inode, dir, mode);
/* * Normally we will only go through one pass of this loop, * unless we get unlucky and it turns out the group we selected * had its last inode grabbed by someone else.
*/ for (i = 0; i < ngroups; i++, ino = 0) {
err = -EIO;
gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp) goto out;
/* * Check free inodes count before loading bitmap.
*/ if (ext4_free_inodes_count(sb, gdp) == 0) goto next_group;
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
grp = ext4_get_group_info(sb, group); /* * Skip groups with already-known suspicious inode * tables
*/ if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) goto next_group;
}
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group); /* Skip groups with suspicious inode tables */ if (IS_ERR(inode_bitmap_bh)) {
inode_bitmap_bh = NULL; goto next_group;
} if (!(sbi->s_mount_state & EXT4_FC_REPLAY) &&
EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) goto next_group;
ret2 = find_inode_bit(sb, group, inode_bitmap_bh, &ino); if (!ret2) goto next_group;
/* We may have to initialize the block bitmap if it isn't already */ if (ext4_has_group_desc_csum(sb) &&
gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { struct buffer_head *block_bitmap_bh;
/* recheck and clear flag under lock if we still need to */
ext4_lock_group(sb, group); if (ext4_has_group_desc_csum(sb) &&
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
ext4_block_bitmap_csum_set(sb, gdp, block_bitmap_bh);
ext4_group_desc_csum_set(sb, group, gdp);
}
ext4_unlock_group(sb, group);
brelse(block_bitmap_bh);
if (err) {
ext4_std_error(sb, err); goto out;
}
}
/* Update the relevant bg descriptor fields */ if (ext4_has_group_desc_csum(sb)) { int free; struct ext4_group_info *grp = NULL;
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
grp = ext4_get_group_info(sb, group); if (!grp) {
err = -EFSCORRUPTED; goto out;
}
down_read(&grp->alloc_sem); /* * protect vs itable * lazyinit
*/
}
ext4_lock_group(sb, group); /* while we modify the bg desc */
free = EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp); if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_INODE_UNINIT);
free = 0;
} /* * Check the relative inode number against the last used * relative inode number in this group. if it is greater * we need to update the bg_itable_unused count
*/ if (ino > free)
ext4_itable_unused_set(sb, gdp,
(EXT4_INODES_PER_GROUP(sb) - ino)); if (!(sbi->s_mount_state & EXT4_FC_REPLAY))
up_read(&grp->alloc_sem);
} else {
ext4_lock_group(sb, group);
}
ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); if (S_ISDIR(mode)) {
ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1); if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group);
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb); /* This is the optimal IO size (for stat), not the fs block size */
inode->i_blocks = 0;
simple_inode_init_ts(inode);
ei->i_crtime = inode_get_mtime(inode);
ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
ext4_set_inode_state(inode, EXT4_STATE_NEW);
ei->i_extra_isize = sbi->s_want_extra_isize;
ei->i_inline_off = 0; if (ext4_has_feature_inline_data(sb) &&
(!(ei->i_flags & (EXT4_DAX_FL|EXT4_EA_INODE_FL)) || S_ISDIR(mode)))
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
ret = inode;
err = dquot_alloc_inode(inode); if (err) goto fail_drop;
/* * Since the encryption xattr will always be unique, create it first so * that it's less likely to end up in an external xattr block and * prevent its deduplication.
*/ if (encrypt) {
err = fscrypt_set_context(inode, handle); if (err) goto fail_free_drop;
}
if (!(ei->i_flags & EXT4_EA_INODE_FL)) {
err = ext4_init_acl(handle, inode, dir); if (err) goto fail_free_drop;
if (ext4_has_feature_extents(sb)) { /* set extent flag only for directory, file and normal symlink*/ if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
ext4_ext_tree_init(handle, inode);
}
}
/* Having the inode bit set should be a 100% indicator that this * is a valid orphan (no e2fsck run on fs). Orphans also include * inodes that were being truncated, so we can't check i_nlink==0.
*/ if (!ext4_test_bit(bit, bitmap_bh->b_data)) goto bad_orphan;
/* * If the orphans has i_nlinks > 0 then it should be able to * be truncated, otherwise it won't be removed from the orphan * list during processing and an infinite loop will result. * Similarly, it must not be a bad inode.
*/ if ((inode->i_nlink && !ext4_can_truncate(inode)) ||
is_bad_inode(inode)) goto bad_orphan;
if (NEXT_ORPHAN(inode) > max_ino) goto bad_orphan;
brelse(bitmap_bh); return inode;
bad_orphan:
ext4_error(sb, "bad orphan inode %lu", ino); if (bitmap_bh)
printk(KERN_ERR "ext4_test_bit(bit=%d, block=%llu) = %d\n",
bit, (unsignedlonglong)bitmap_bh->b_blocknr,
ext4_test_bit(bit, bitmap_bh->b_data)); if (inode) {
printk(KERN_ERR "is_bad_inode(inode)=%d\n",
is_bad_inode(inode));
printk(KERN_ERR "NEXT_ORPHAN(inode)=%u\n",
NEXT_ORPHAN(inode));
printk(KERN_ERR "max_ino=%lu\n", max_ino);
printk(KERN_ERR "i_nlink=%u\n", inode->i_nlink); /* Avoid freeing blocks if we got a bad deleted inode */ if (inode->i_nlink == 0)
inode->i_blocks = 0;
iput(inode);
}
brelse(bitmap_bh); return ERR_PTR(err);
}
es = EXT4_SB(sb)->s_es;
desc_count = 0;
bitmap_count = 0;
gdp = NULL; for (i = 0; i < ngroups; i++) {
gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue;
desc_count += ext4_free_inodes_count(sb, gdp);
brelse(bitmap_bh);
bitmap_bh = ext4_read_inode_bitmap(sb, i); if (IS_ERR(bitmap_bh)) {
bitmap_bh = NULL; continue;
}
x = ext4_count_free(bitmap_bh->b_data,
EXT4_INODES_PER_GROUP(sb) / 8);
printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
(unsignedlong) i, ext4_free_inodes_count(sb, gdp), x);
bitmap_count += x;
}
brelse(bitmap_bh);
printk(KERN_DEBUG "ext4_count_free_inodes: " "stored = %u, computed = %lu, %lu\n",
le32_to_cpu(es->s_free_inodes_count), desc_count, bitmap_count); return desc_count; #else
desc_count = 0; for (i = 0; i < ngroups; i++) {
gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue;
desc_count += ext4_free_inodes_count(sb, gdp);
cond_resched();
} return desc_count; #endif
}
/* Called at mount-time, super-block is locked */ unsignedlong ext4_count_dirs(struct super_block * sb)
{ unsignedlong count = 0;
ext4_group_t i, ngroups = ext4_get_groups_count(sb);
for (i = 0; i < ngroups; i++) { struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL); if (!gdp) continue;
count += ext4_used_dirs_count(sb, gdp);
} return count;
}
/* * Zeroes not yet zeroed inode table - just write zeroes through the whole * inode table. Must be called without any spinlock held. The only place * where it is called from on active part of filesystem is ext4lazyinit * thread, so we do not need any special locks, however we have to prevent * inode allocation from the current group, so we take alloc_sem lock, to * block ext4_new_inode() until we are finished.
*/ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, int barrier)
{ struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_group_desc *gdp = NULL; struct buffer_head *group_desc_bh;
handle_t *handle;
ext4_fsblk_t blk; int num, ret = 0, used_blks = 0; unsignedlong used_inos = 0;
gdp = ext4_get_group_desc(sb, group, &group_desc_bh); if (!gdp || !grp) goto out;
/* * We do not need to lock this, because we are the only one * handling this flag.
*/ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) goto out;
handle = ext4_journal_start_sb(sb, EXT4_HT_MISC, 1); if (IS_ERR(handle)) {
ret = PTR_ERR(handle); goto out;
}
down_write(&grp->alloc_sem); /* * If inode bitmap was already initialized there may be some * used inodes so we need to skip blocks with used inodes in * inode table.
*/ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
used_inos = EXT4_INODES_PER_GROUP(sb) -
ext4_itable_unused_count(sb, gdp);
used_blks = DIV_ROUND_UP(used_inos, sbi->s_inodes_per_block);
/* Bogus inode unused count? */ if (used_blks < 0 || used_blks > sbi->s_itb_per_group) {
ext4_error(sb, "Something is wrong with group %u: " "used itable blocks: %d; " "itable unused count: %u",
group, used_blks,
ext4_itable_unused_count(sb, gdp));
ret = 1; goto err_out;
}
used_inos += group * EXT4_INODES_PER_GROUP(sb); /* * Are there some uninitialized inodes in the inode table * before the first normal inode?
*/ if ((used_blks != sbi->s_itb_per_group) &&
(used_inos < EXT4_FIRST_INO(sb))) {
ext4_error(sb, "Something is wrong with group %u: " "itable unused count: %u; " "itables initialized count: %ld",
group, ext4_itable_unused_count(sb, gdp),
used_inos);
ret = 1; goto err_out;
}
}
BUFFER_TRACE(group_desc_bh, "get_write_access");
ret = ext4_journal_get_write_access(handle, sb, group_desc_bh,
EXT4_JTR_NONE); if (ret) goto err_out;
/* * Skip zeroout if the inode table is full. But we set the ZEROED * flag anyway, because obviously, when it is full it does not need * further zeroing.
*/ if (unlikely(num == 0)) goto skip_zeroout;
ext4_debug("going to zero out inode table in group %d\n",
group);
ret = sb_issue_zeroout(sb, blk, num, GFP_NOFS); if (ret < 0) goto err_out; if (barrier)
blkdev_issue_flush(sb->s_bdev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.