/* * Ensure that we do not have any outstanding pNFS layouts that can be used by * clients to directly read from or write to this inode. This must be called * before every operation that can remove blocks from the extent map. * Additionally we call it during the write operation, where aren't concerned * about exposing unallocated blocks but just want to provide basic * synchronization between a local writer and pNFS clients. mmap writes would * also benefit from this sort of synchronization, but due to the tricky locking * rules in the page fault path we don't bother.
*/ int
xfs_break_leased_layouts( struct inode *inode,
uint *iolock, bool *did_unlock)
{ struct xfs_inode *ip = XFS_I(inode); int error;
/* * Get a unique ID including its location so that the client can identify * the exported device.
*/ int
xfs_fs_get_uuid( struct super_block *sb,
u8 *buf,
u32 *len,
u64 *offset)
{ struct xfs_mount *mp = XFS_M(sb);
/* * We cannot use file based VFS helpers such as file_modified() to update * inode state as we modify the data/metadata in the inode here. Hence we have * to open code the timestamp updates and SUID/SGID stripping. We also need * to set the inode prealloc flag to ensure that the extents we allocate are not * removed if the inode is reclaimed from memory before xfs_fs_block_commit() * is from the client to indicate that data has been written and the file size * can be extended.
*/ staticint
xfs_fs_map_update_inode( struct xfs_inode *ip)
{ struct xfs_trans *tp; int error;
/* * Get a layout for the pNFS client.
*/ int
xfs_fs_map_blocks( struct inode *inode,
loff_t offset,
u64 length, struct iomap *iomap, bool write,
u32 *device_generation)
{ struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct xfs_bmbt_irec imap;
xfs_fileoff_t offset_fsb, end_fsb;
loff_t limit; int bmapi_flags = XFS_BMAPI_ENTIRE; int nimaps = 1;
uint lock_flags; int error = 0;
u64 seq;
if (xfs_is_shutdown(mp)) return -EIO;
/* * We can't export inodes residing on the realtime device. The realtime * device doesn't have a UUID to identify it, so the client has no way * to find it.
*/ if (XFS_IS_REALTIME_INODE(ip)) return -ENXIO;
/* * The pNFS block layout spec actually supports reflink like * functionality, but the Linux pNFS server doesn't implement it yet.
*/ if (xfs_is_reflink_inode(ip)) return -ENXIO;
/* * Lock out any other I/O before we flush and invalidate the pagecache, * and then hand out a layout to the remote system. This is very * similar to direct I/O, except that the synchronization is much more * complicated. See the comment near xfs_break_leased_layouts * for a detailed explanation.
*/
xfs_ilock(ip, XFS_IOLOCK_EXCL);
error = -EINVAL;
limit = mp->m_super->s_maxbytes; if (!write)
limit = max(limit, round_up(i_size_read(inode),
inode->i_sb->s_blocksize)); if (offset > limit) goto out_unlock; if (offset > limit - length)
length = limit - offset;
error = filemap_write_and_wait(inode->i_mapping); if (error) goto out_unlock;
error = invalidate_inode_pages2(inode->i_mapping); if (WARN_ON_ONCE(error)) goto out_unlock;
/* * Ensure the next transaction is committed synchronously so * that the blocks allocated and handed out to the client are * guaranteed to be present even after a server crash.
*/
error = xfs_fs_map_update_inode(ip); if (!error)
error = xfs_log_force_inode(ip); if (error) goto out_unlock;
/* * Make sure the blocks described by maps are stable on disk. This includes * converting any unwritten extents, flushing the disk cache and updating the * time stamps. * * Note that we rely on the caller to always send us a timestamp update so that * we always commit a transaction here. If that stops being true we will have * to manually flush the cache here similar to what the fsync code path does * for datasyncs on files that have no dirty metadata.
*/ int
xfs_fs_commit_blocks( struct inode *inode, struct iomap *maps, int nr_maps, struct iattr *iattr)
{ struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; struct xfs_trans *tp; bool update_isize = false; int error, i;
loff_t size;
for (i = 0; i < nr_maps; i++) {
u64 start, length, end;
start = maps[i].offset; if (start > size) continue;
end = start + maps[i].length; if (end > size)
end = size;
length = end - start; if (!length) continue;
/* * Make sure reads through the pagecache see the new data.
*/
error = invalidate_inode_pages2_range(inode->i_mapping,
start >> PAGE_SHIFT,
(end - 1) >> PAGE_SHIFT);
WARN_ON_ONCE(error);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.