inode_sub_bytes(inode, i_blocksize(inode) * n); if (root)
atomic64_sub(n, &root->blocks_count);
}
/** * nilfs_get_block() - get a file block on the filesystem (callback function) * @inode: inode struct of the target file * @blkoff: file block number * @bh_result: buffer head to be mapped on * @create: indicate whether allocating the block or not when it has not * been allocated yet. * * This function does not issue actual read request of the specified data * block. It is done by VFS. * * Return: 0 on success, or a negative error code on failure.
*/ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create)
{ struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
__u64 blknum = 0; int err = 0, ret; unsignedint maxblocks = bh_result->b_size >> inode->i_blkbits;
down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); if (ret >= 0) { /* found */
map_bh(bh_result, inode->i_sb, blknum); if (ret > 0)
bh_result->b_size = (ret << inode->i_blkbits); goto out;
} /* data block was not found */ if (ret == -ENOENT && create) { struct nilfs_transaction_info ti;
bh_result->b_blocknr = 0;
err = nilfs_transaction_begin(inode->i_sb, &ti, 1); if (unlikely(err)) goto out;
err = nilfs_bmap_insert(ii->i_bmap, blkoff,
(unsignedlong)bh_result); if (unlikely(err != 0)) { if (err == -EEXIST) { /* * The get_block() function could be called * from multiple callers for an inode. * However, the page having this block must * be locked in this case.
*/
nilfs_warn(inode->i_sb, "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
__func__, inode->i_ino,
(unsignedlonglong)blkoff);
err = -EAGAIN;
}
nilfs_transaction_abort(inode->i_sb); goto out;
}
nilfs_mark_inode_dirty_sync(inode);
nilfs_transaction_commit(inode->i_sb); /* never fails */ /* Error handling should be detailed */
set_buffer_new(bh_result);
set_buffer_delay(bh_result);
map_bh(bh_result, inode->i_sb, 0); /* Disk block number must be changed to proper value */
} elseif (ret == -ENOENT) { /* * not found is not error (e.g. hole); must return without * the mapped state flag.
*/
;
} else {
err = ret;
}
out: return err;
}
/** * nilfs_read_folio() - implement read_folio() method of nilfs_aops {} * address_space_operations. * @file: file struct of the file to be read * @folio: the folio to be read * * Return: 0 on success, or a negative error code on failure.
*/ staticint nilfs_read_folio(struct file *file, struct folio *folio)
{ return mpage_read_folio(folio, nilfs_get_block);
}
/* * The page may not be locked, eg if called from try_to_unmap_one()
*/
spin_lock(&mapping->i_private_lock);
head = folio_buffers(folio); if (head) { struct buffer_head *bh = head;
do { /* Do not mark hole blocks dirty */ if (buffer_dirty(bh) || !buffer_mapped(bh)) continue;
err = nilfs_init_acl(inode, dir); if (unlikely(err)) /* * Never occur. When supporting nilfs_init_acl(), * proper cancellation of above jobs should be considered.
*/ goto failed_after_creation;
return inode;
failed_after_creation:
clear_nlink(inode); if (inode->i_state & I_NEW)
unlock_new_inode(inode);
iput(inode); /* * raw_inode will be deleted through * nilfs_evict_inode().
*/ goto failed;
/** * nilfs_attach_btree_node_cache - attach a B-tree node cache to the inode * @inode: inode object * * nilfs_attach_btree_node_cache() attaches a B-tree node cache to @inode, * or does nothing if the inode already has it. This function allocates * an additional inode to maintain page cache of B-tree nodes one-on-one. * * Return: 0 on success, or %-ENOMEM if memory is insufficient.
*/ int nilfs_attach_btree_node_cache(struct inode *inode)
{ struct nilfs_inode_info *ii = NILFS_I(inode); struct inode *btnc_inode; struct nilfs_iget_args args;
/** * nilfs_detach_btree_node_cache - detach the B-tree node cache from the inode * @inode: inode object * * nilfs_detach_btree_node_cache() detaches the B-tree node cache and its * holder inode bound to @inode, or does nothing if @inode doesn't have it.
*/ void nilfs_detach_btree_node_cache(struct inode *inode)
{ struct nilfs_inode_info *ii = NILFS_I(inode); struct inode *btnc_inode = ii->i_assoc_inode;
/** * nilfs_iget_for_shadow - obtain inode for shadow mapping * @inode: inode object that uses shadow mapping * * nilfs_iget_for_shadow() allocates a pair of inodes that holds page * caches for shadow mapping. The page cache for data pages is set up * in one inode and the one for b-tree node pages is set up in the * other inode, which is attached to the former inode. * * Return: a pointer to the inode for data pages on success, or %-ENOMEM * if memory is insufficient.
*/ struct inode *nilfs_iget_for_shadow(struct inode *inode)
{ struct nilfs_iget_args args = {
.ino = inode->i_ino, .root = NULL, .cno = 0,
.type = NILFS_I_TYPE_SHADOW
}; struct inode *s_inode; int err;
s_inode = iget5_locked(inode->i_sb, inode->i_ino, nilfs_iget_test,
nilfs_iget_set, &args); if (unlikely(!s_inode)) return ERR_PTR(-ENOMEM); if (!(s_inode->i_state & I_NEW)) return inode;
/** * nilfs_write_inode_common - export common inode information to on-disk inode * @inode: inode object * @raw_inode: on-disk inode * * This function writes standard information from the on-memory inode @inode * to @raw_inode on ifile, cpfile or a super root block. Since inode bmap * data is not exported, nilfs_bmap_write() must be called separately during * log writing.
*/ void nilfs_write_inode_common(struct inode *inode, struct nilfs_inode *raw_inode)
{ struct nilfs_inode_info *ii = NILFS_I(inode);
if (!test_bit(NILFS_I_BMAP, &ii->i_state)) return;
repeat:
ret = nilfs_bmap_last_key(ii->i_bmap, &b); if (ret == -ENOENT) return; elseif (ret < 0) goto failed;
if (b < from) return;
b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
ret = nilfs_bmap_truncate(ii->i_bmap, b);
nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb); if (!ret || (ret == -ENOMEM &&
nilfs_bmap_truncate(ii->i_bmap, b) == 0)) goto repeat;
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode)); if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
nilfs_mark_inode_dirty(inode);
nilfs_set_file_dirty(inode, 0);
nilfs_transaction_commit(sb); /* * May construct a logical segment and may fail in sync mode. * But truncate has no return value.
*/
}
if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
nilfs_clear_inode(inode); return;
}
nilfs_transaction_begin(sb, &ti, 0); /* never fails */
truncate_inode_pages_final(&inode->i_data);
nilfs = sb->s_fs_info; if (unlikely(sb_rdonly(sb) || !nilfs->ns_writer)) { /* * If this inode is about to be disposed after the file system * has been degraded to read-only due to file system corruption * or after the writer has been detached, do not make any * changes that cause writes, just clear it. * Do this check after read-locking ns_segctor_sem by * nilfs_transaction_begin() in order to avoid a race with * the writer detach operation.
*/
clear_inode(inode);
nilfs_clear_inode(inode);
nilfs_transaction_abort(sb); return;
}
/* TODO: some of the following operations may fail. */
nilfs_truncate_bmap(ii, 0);
nilfs_mark_inode_dirty(inode);
clear_inode(inode);
ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); if (!ret)
atomic64_dec(&ii->i_root->inodes_count);
nilfs_clear_inode(inode);
if (IS_SYNC(inode))
nilfs_set_transaction_flag(NILFS_TI_SYNC);
nilfs_transaction_commit(sb); /* * May construct a logical segment and may fail in sync mode. * But delete_inode has no return value.
*/
}
if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state)) return 0;
spin_lock(&nilfs->ns_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
!test_bit(NILFS_I_BUSY, &ii->i_state)) { /* * Because this routine may race with nilfs_dispose_list(), * we have to check NILFS_I_QUEUED here, too.
*/ if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { /* * This will happen when somebody is freeing * this inode.
*/
nilfs_warn(inode->i_sb, "cannot set file dirty (ino=%lu): the file is being freed",
inode->i_ino);
spin_unlock(&nilfs->ns_inode_lock); return -EINVAL; /* * NILFS_I_DIRTY may remain for * freeing inode.
*/
}
list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
set_bit(NILFS_I_QUEUED, &ii->i_state);
}
spin_unlock(&nilfs->ns_inode_lock); return 0;
}
int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
{ struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct buffer_head *ibh; int err;
/* * Do not dirty inodes after the log writer has been detached * and its nilfs_root struct has been freed.
*/ if (unlikely(nilfs_purging(nilfs))) return 0;
/** * nilfs_dirty_inode - reflect changes on given inode to an inode block. * @inode: inode of the file to be registered. * @flags: flags to determine the dirty state of the inode * * nilfs_dirty_inode() loads a inode block containing the specified * @inode and copies data from a nilfs_inode to a corresponding inode * entry in the inode block. This operation is excluded from the segment * construction. This function can be called both as a single operation * and as a part of indivisible file operations.
*/ void nilfs_dirty_inode(struct inode *inode, int flags)
{ struct nilfs_transaction_info ti; struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
if (is_bad_inode(inode)) {
nilfs_warn(inode->i_sb, "tried to mark bad_inode dirty. ignored.");
dump_stack(); return;
} if (mdi) {
nilfs_mdt_mark_dirty(inode); return;
}
nilfs_transaction_begin(inode->i_sb, &ti, 0);
__nilfs_mark_inode_dirty(inode, flags);
nilfs_transaction_commit(inode->i_sb); /* never fails */
}
if (delalloc_blklen && blkoff == delalloc_blkoff) { if (size) { /* End of the current extent */
ret = fiemap_fill_next_extent(
fieinfo, logical, phys, size, flags); if (ret) break;
} if (blkoff > end_blkoff) break;
/* * Limit the number of blocks that we look up so as * not to get into the next delayed allocation extent.
*/
maxblocks = INT_MAX; if (delalloc_blklen)
maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
maxblocks);
blkphy = 0;
down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
n = nilfs_bmap_lookup_contig(
NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.