/* * Check whether there are new backup superblocks exist * in the last group. If there are some, mark them or clear * them in the bitmap. * * Return how many backups we find in the last group.
*/ static u16 ocfs2_calc_new_backup_super(struct inode *inode, struct ocfs2_group_desc *gd,
u16 cl_cpg,
u16 old_bg_clusters, int set)
{ int i;
u16 backups = 0;
u32 cluster, lgd_cluster;
u64 blkno, gd_blkno, lgd_blkno = le64_to_cpu(gd->bg_blkno);
for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
ret = ocfs2_journal_access_gd(handle, INODE_CACHE(bm_inode),
group_bh, OCFS2_JOURNAL_ACCESS_WRITE); if (ret < 0) {
mlog_errno(ret); goto out;
}
group = (struct ocfs2_group_desc *)group_bh->b_data;
old_bg_clusters = le16_to_cpu(group->bg_bits) / cl_bpc; /* update the group first. */
num_bits = new_clusters * cl_bpc;
le16_add_cpu(&group->bg_bits, num_bits);
le16_add_cpu(&group->bg_free_bits_count, num_bits);
/* * check whether there are some new backup superblocks exist in * this group and update the group bitmap accordingly.
*/ if (OCFS2_HAS_COMPAT_FEATURE(osb->sb,
OCFS2_FEATURE_COMPAT_BACKUP_SB)) {
backups = ocfs2_calc_new_backup_super(bm_inode,
group,
cl_cpg, old_bg_clusters, 1);
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
}
/* calculate the real backups we need to update. */ for (i = 0; i < OCFS2_MAX_BACKUP_SUPERBLOCKS; i++) {
blkno = ocfs2_backup_super_blkno(inode->i_sb, i);
cluster = ocfs2_blocks_to_clusters(inode->i_sb, blkno); if (cluster >= clusters) break;
ret = ocfs2_read_blocks_sync(osb, blkno, 1, &backup); if (ret < 0) {
mlog_errno(ret); break;
}
/* * update the superblock last. * It doesn't matter if the write failed.
*/
ret = ocfs2_read_blocks_sync(osb, OCFS2_SUPER_BLOCK_BLKNO, 1,
&super_bh); if (ret < 0) {
mlog_errno(ret); goto out;
}
ret = ocfs2_write_super_or_backup(osb, super_bh); if (ret < 0) {
mlog_errno(ret); goto out;
}
if (OCFS2_HAS_COMPAT_FEATURE(osb->sb, OCFS2_FEATURE_COMPAT_BACKUP_SB))
ret = update_backups(inode, clusters, super_bh->b_data);
out:
brelse(super_bh); if (ret)
printk(KERN_WARNING "ocfs2: Failed to update super blocks on %s" " during fs resize. This condition is not fatal," " but fsck.ocfs2 should be run to fix it\n",
osb->dev_str); return;
}
/* * Extend the filesystem to the new number of clusters specified. This entry * point is only used to extend the current filesystem to the end of the last * existing group.
*/ int ocfs2_group_extend(struct inode * inode, int new_clusters)
{ int ret;
handle_t *handle; struct buffer_head *main_bm_bh = NULL; struct buffer_head *group_bh = NULL; struct inode *main_bm_inode = NULL; struct ocfs2_dinode *fe = NULL; struct ocfs2_group_desc *group = NULL; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
u16 cl_bpc;
u32 first_new_cluster;
u64 lgd_blkno;
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS;
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT); if (!main_bm_inode) {
ret = -EINVAL;
mlog_errno(ret); goto out;
}
inode_lock(main_bm_inode);
ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); if (ret < 0) {
mlog_errno(ret); goto out_mutex;
}
fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
/* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
* so any corruption is a code bug. */
BUG_ON(!OCFS2_IS_VALID_DINODE(fe));
if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
ocfs2_group_bitmap_size(osb->sb, 0,
osb->s_feature_incompat) * 8) {
mlog(ML_ERROR, "The disk is too old and small. " "Force to do offline resize.");
ret = -EINVAL; goto out_unlock;
}
handle = ocfs2_start_trans(osb, OCFS2_GROUP_EXTEND_CREDITS); if (IS_ERR(handle)) {
mlog_errno(PTR_ERR(handle));
ret = -EINVAL; goto out_unlock;
}
/* update the last group descriptor and inode. */
ret = ocfs2_update_last_group_and_inode(handle, main_bm_inode,
main_bm_bh, group_bh,
first_new_cluster,
new_clusters); if (ret) {
mlog_errno(ret); goto out_commit;
}
if (cluster < total_clusters)
mlog(ML_ERROR, "add a group which is in the current volume.\n"); elseif (input->chain >= cl_count)
mlog(ML_ERROR, "input chain exceeds the limit.\n"); elseif (next_free != cl_count && next_free != input->chain)
mlog(ML_ERROR, "the add group should be in chain %u\n", next_free); elseif (total_clusters + input->clusters < total_clusters)
mlog(ML_ERROR, "add group's clusters overflow.\n"); elseif (input->clusters > cl_cpg)
mlog(ML_ERROR, "the cluster exceeds the maximum of a group\n"); elseif (input->frees > input->clusters)
mlog(ML_ERROR, "the free cluster exceeds the total clusters\n"); elseif (total_clusters % cl_cpg != 0)
mlog(ML_ERROR, "the last group isn't full. Use group extend first.\n"); elseif (input->group != ocfs2_which_cluster_group(inode, cluster))
mlog(ML_ERROR, "group blkno is invalid\n"); elseif ((ret = ocfs2_check_new_group(inode, di, input, group_bh)))
mlog(ML_ERROR, "group descriptor check failed.\n"); else
ret = 0;
if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) return -EROFS;
main_bm_inode = ocfs2_get_system_file_inode(osb,
GLOBAL_BITMAP_SYSTEM_INODE,
OCFS2_INVALID_SLOT); if (!main_bm_inode) {
ret = -EINVAL;
mlog_errno(ret); goto out;
}
inode_lock(main_bm_inode);
ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 1); if (ret < 0) {
mlog_errno(ret); goto out_mutex;
}
fe = (struct ocfs2_dinode *)main_bm_bh->b_data;
if (le16_to_cpu(fe->id2.i_chain.cl_cpg) !=
ocfs2_group_bitmap_size(osb->sb, 0,
osb->s_feature_incompat) * 8) {
mlog(ML_ERROR, "The disk is too old and small." " Force to do offline resize.");
ret = -EINVAL; goto out_unlock;
}
ret = ocfs2_read_blocks_sync(osb, input->group, 1, &group_bh); if (ret < 0) {
mlog(ML_ERROR, "Can't read the group descriptor # %llu " "from the device.", (unsignedlonglong)input->group); goto out_unlock;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.