// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*/
/** * gfs2_llseek - seek to a location in a file * @file: the file * @offset: the offset * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END) * * SEEK_END requires the glock for the file because it references the * file's size. * * Returns: The new offset, or errno
*/
case SEEK_DATA:
error = gfs2_seek_data(file, offset); break;
case SEEK_HOLE:
error = gfs2_seek_hole(file, offset); break;
case SEEK_CUR: case SEEK_SET: /* * These don't reference inode->i_size and don't depend on the * block mapping, so we don't need the glock.
*/
error = generic_file_llseek(file, offset, whence); break; default:
error = -EINVAL;
}
return error;
}
/** * gfs2_readdir - Iterator for a directory * @file: The directory to read from * @ctx: What to feed directory entries to * * Returns: errno
*/
flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC); if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
flags |= S_NOSEC; if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
flags |= S_IMMUTABLE; if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
flags |= S_APPEND; if (ip->i_diskflags & GFS2_DIF_NOATIME)
flags |= S_NOATIME; if (ip->i_diskflags & GFS2_DIF_SYNC)
flags |= S_SYNC;
inode->i_flags = flags;
}
/* Flags that can be set by user space */ #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
GFS2_DIF_IMMUTABLE| \
GFS2_DIF_APPENDONLY| \
GFS2_DIF_NOATIME| \
GFS2_DIF_SYNC| \
GFS2_DIF_TOPDIR| \
GFS2_DIF_INHERIT_JDATA)
/** * do_gfs2_set_flags - set flags on an inode * @inode: The inode * @reqflags: The flags to set * @mask: Indicates which flags are valid *
*/ staticint do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
{ struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); struct buffer_head *bh; struct gfs2_holder gh; int error;
u32 new_flags, flags;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (error) return error;
for (i = 0; i < ARRAY_SIZE(fsflag_gfs2flag); i++) { if (fsflags & fsflag_gfs2flag[i].fsflag) {
fsflags &= ~fsflag_gfs2flag[i].fsflag;
gfsflags |= fsflag_gfs2flag[i].gfsflag;
}
} if (fsflags || gfsflags & ~GFS2_FLAGS_USER_SET) return -EINVAL;
mask = GFS2_FLAGS_USER_SET; if (S_ISDIR(inode->i_mode)) {
mask &= ~GFS2_DIF_JDATA;
} else { /* The GFS2_DIF_TOPDIR flag is only valid for directories. */ if (gfsflags & GFS2_DIF_TOPDIR) return -EINVAL;
mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
}
/** * gfs2_size_hint - Give a hint to the size of a write request * @filep: The struct file * @offset: The file offset of the write * @size: The length of the write * * When we are about to do a write, this function records the total * write size in order to provide a suitable hint to the lower layers * about how many blocks will be required. *
*/
if (hint > atomic_read(&ip->i_sizehint))
atomic_set(&ip->i_sizehint, hint);
}
/** * gfs2_allocate_folio_backing - Allocate blocks for a write fault * @folio: The (locked) folio to allocate backing for * @length: Size of the allocation * * We try to allocate all the blocks required for the folio in one go. This * might fail for various reasons, so we keep trying until all the blocks to * back this folio are allocated. If some of the blocks are already allocated, * that is ok too.
*/ staticint gfs2_allocate_folio_backing(struct folio *folio, size_t length)
{
u64 pos = folio_pos(folio);
do { struct iomap iomap = { };
if (gfs2_iomap_alloc(folio->mapping->host, pos, length, &iomap)) return -EIO;
if (length < iomap.length)
iomap.length = length;
length -= iomap.length;
pos += iomap.length;
} while (length > 0);
return 0;
}
/** * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable * @vmf: The virtual memory fault containing the page to become writable * * When the page becomes writable, we need to ensure that we have * blocks allocated on disk to back that page.
*/
/* Unstuff, if required, and allocate backing blocks for folio */ if (gfs2_is_stuffed(ip)) {
err = gfs2_unstuff_dinode(ip); if (err) {
ret = vmf_fs_error(err); goto out_trans_end;
}
}
folio_lock(folio); /* If truncated, we must retry the operation, we may have raced * with the glock demotion code.
*/ if (!folio_test_uptodate(folio) || folio->mapping != inode->i_mapping) {
ret = VM_FAULT_NOPAGE; goto out_page_locked;
}
err = gfs2_allocate_folio_backing(folio, length); if (err)
ret = vmf_fs_error(err);
/** * gfs2_mmap * @file: The file to map * @vma: The VMA which described the mapping * * There is no need to get a lock here unless we should be updating * atime. We ignore any locking errors since the only consequence is * a missed atime update (which will just be deferred until later). * * Returns: 0
*/
/** * gfs2_open_common - This is common to open and atomic_open * @inode: The inode being opened * @file: The file being opened * * This maybe called under a glock or not depending upon how it has * been called. We must always be called under a glock for regular * files, however. For other file types, it does not matter whether * we hold the glock or not. * * Returns: Error code or 0 for success
*/
int gfs2_open_common(struct inode *inode, struct file *file)
{ struct gfs2_file *fp; int ret;
if (S_ISREG(inode->i_mode)) {
ret = generic_file_open(inode, file); if (ret) return ret;
if (!gfs2_is_jdata(GFS2_I(inode)))
file->f_mode |= FMODE_CAN_ODIRECT;
}
fp = kzalloc(sizeof(struct gfs2_file), GFP_NOFS); if (!fp) return -ENOMEM;
mutex_init(&fp->f_fl_mutex);
gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
file->private_data = fp; if (file->f_mode & FMODE_WRITE) {
ret = gfs2_qa_get(GFS2_I(inode)); if (ret) goto fail;
} return 0;
/** * gfs2_open - open a file * @inode: the inode to open * @file: the struct file for this opening * * After atomic_open, this function is only used for opening files * which are already cached. We must still get the glock for regular * files to ensure that we have the file size uptodate for the large * file check which is in the common code. That is only an issue for * regular files though. * * Returns: errno
*/
if (S_ISREG(ip->i_inode.i_mode)) {
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
&i_gh); if (error) return error;
need_unlock = true;
}
error = gfs2_open_common(inode, file);
if (need_unlock)
gfs2_glock_dq_uninit(&i_gh);
return error;
}
/** * gfs2_release - called to close a struct file * @inode: the inode the struct file belongs to * @file: the struct file being closed * * Returns: errno
*/
if (file->f_mode & FMODE_WRITE) { if (gfs2_rs_active(&ip->i_res))
gfs2_rs_delete(ip);
gfs2_qa_put(ip);
} return 0;
}
/** * gfs2_fsync - sync the dirty data for a file (across the cluster) * @file: the file that points to the dentry * @start: the start position in the file to sync * @end: the end position in the file to sync * @datasync: set if we can ignore timestamp changes * * We split the data flushing here so that we don't wait for the data * until after we've also sent the metadata to disk. Note that for * data=ordered, we will write & wait for the data at the log flush * stage anyway, so this is unlikely to make much of a difference * except in the data=writeback case. * * If the fdatawrite fails due to any reason except -EIO, we will * continue the remainder of the fsync, although we'll still report * the error at the end. This is to match filemap_write_and_wait_range() * behaviour. * * Returns: errno
*/
staticint gfs2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{ struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; int sync_state = inode->i_state & I_DIRTY; struct gfs2_inode *ip = GFS2_I(inode); int ret = 0, ret1 = 0;
if (mapping->nrpages) {
ret1 = filemap_fdatawrite_range(mapping, start, end); if (ret1 == -EIO) return ret1;
}
if (!gfs2_is_jdata(ip))
sync_state &= ~I_DIRTY_PAGES; if (datasync)
sync_state &= ~I_DIRTY_SYNC;
if (sync_state) {
ret = sync_inode_metadata(inode, 1); if (ret) return ret; if (gfs2_is_jdata(ip))
ret = file_write_and_wait(file); if (ret) return ret;
gfs2_ail_flush(ip->i_gl, 1);
}
if (mapping->nrpages)
ret = file_fdatawait_range(file, start, end);
if (!count) returnfalse; if (!user_backed_iter(i)) returnfalse;
/* * Try to fault in multiple pages initially. When that doesn't result * in any progress, fall back to a single page.
*/
size = PAGE_SIZE;
offs = offset_in_page(iocb->ki_pos); if (*prev_count != count) {
size_t nr_dirtied;
/* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate * that the inode glock should be dropped, fault in the pages manually, * and retry. * * Unlike generic_file_read_iter, for reads, iomap_dio_rw can trigger * physical as well as manual page faults, and we need to disable both * kinds. * * For direct I/O, gfs2 takes the inode glock in deferred mode. This * locking mode is compatible with other deferred holders, so multiple * processes and nodes can do direct I/O to a file at the same time. * There's no guarantee that reads or writes will be atomic. Any * coordination among readers and writers needs to happen externally.
*/
if (!iov_iter_count(to)) return 0; /* skip atime */
gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
retry:
ret = gfs2_glock_nq(gh); if (ret) goto out_uninit;
pagefault_disable();
to->nofault = true;
ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL,
IOMAP_DIO_PARTIAL, NULL, read);
to->nofault = false;
pagefault_enable(); if (ret <= 0 && ret != -EFAULT) goto out_unlock; /* No increment (+=) because iomap_dio_rw returns a cumulative value. */ if (ret > 0)
read = ret;
if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
gfs2_glock_dq(gh);
window_size -= fault_in_iov_iter_writeable(to, window_size); if (window_size) goto retry;
}
out_unlock: if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh); /* User space doesn't expect partial success. */ if (ret < 0) return ret; return read;
}
/* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate * that the inode glock should be dropped, fault in the pages manually, * and retry. * * For writes, iomap_dio_rw only triggers manual page faults, so we * don't need to disable physical ones.
*/
/* * Deferred lock, even if its a write, since we do no allocation on * this path. All we need to change is the atime, and this lock mode * ensures that other nodes have flushed their buffered read caches * (i.e. their page cache entries for this inode). We do not, * unfortunately, have the option of only flushing a range like the * VFS does.
*/
gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh);
retry:
ret = gfs2_glock_nq(gh); if (ret) goto out_uninit; /* Silently fall back to buffered I/O when writing beyond EOF */ if (iocb->ki_pos + iov_iter_count(from) > i_size_read(&ip->i_inode)) goto out_unlock;
from->nofault = true;
ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL,
IOMAP_DIO_PARTIAL, NULL, written);
from->nofault = false; if (ret <= 0) { if (ret == -ENOTBLK)
ret = 0; if (ret != -EFAULT) goto out_unlock;
} /* No increment (+=) because iomap_dio_rw returns a cumulative value. */ if (ret > 0)
written = ret;
enough_retries = prev_count == iov_iter_count(from) &&
window_size <= PAGE_SIZE; if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
gfs2_glock_dq(gh);
window_size -= fault_in_iov_iter_readable(from, window_size); if (window_size) { if (!enough_retries) goto retry; /* fall back to buffered I/O */
ret = 0;
}
}
out_unlock: if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh); /* User space doesn't expect partial success. */ if (ret < 0) return ret; return written;
}
/* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate * that the inode glock should be dropped, fault in the pages manually, * and retry.
*/
if (iocb->ki_flags & IOCB_DIRECT) return gfs2_file_direct_read(iocb, to, &gh);
pagefault_disable();
iocb->ki_flags |= IOCB_NOIO;
ret = generic_file_read_iter(iocb, to);
iocb->ki_flags &= ~IOCB_NOIO;
pagefault_enable(); if (ret >= 0) { if (!iov_iter_count(to)) return ret;
read = ret;
} elseif (ret != -EFAULT) { if (ret != -EAGAIN) return ret; if (iocb->ki_flags & IOCB_NOWAIT) return ret;
}
ip = GFS2_I(iocb->ki_filp->f_mapping->host);
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
retry:
ret = gfs2_glock_nq(&gh); if (ret) goto out_uninit;
pagefault_disable();
ret = generic_file_read_iter(iocb, to);
pagefault_enable(); if (ret <= 0 && ret != -EFAULT) goto out_unlock; if (ret > 0)
read += ret;
if (should_fault_in_pages(to, iocb, &prev_count, &window_size)) {
gfs2_glock_dq(&gh);
window_size -= fault_in_iov_iter_writeable(to, window_size); if (window_size) goto retry;
}
out_unlock: if (gfs2_holder_queued(&gh))
gfs2_glock_dq(&gh);
out_uninit:
gfs2_holder_uninit(&gh); return read ? read : ret;
}
/* * In this function, we disable page faults when we're holding the * inode glock while doing I/O. If a page fault occurs, we indicate * that the inode glock should be dropped, fault in the pages manually, * and retry.
*/
if (inode == sdp->sd_rindex) {
statfs_gh = kmalloc(sizeof(*statfs_gh), GFP_NOFS); if (!statfs_gh) return -ENOMEM;
}
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, gh); if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
retry:
window_size -= fault_in_iov_iter_readable(from, window_size); if (!window_size) {
ret = -EFAULT; goto out_uninit;
}
from->count = min(from->count, window_size);
}
ret = gfs2_glock_nq(gh); if (ret) goto out_uninit;
if (inode == sdp->sd_rindex) { struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
ret = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
GL_NOCACHE, statfs_gh); if (ret) goto out_unlock;
}
pagefault_disable();
ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops,
&gfs2_iomap_write_ops, NULL);
pagefault_enable(); if (ret > 0)
written += ret;
if (inode == sdp->sd_rindex)
gfs2_glock_dq_uninit(statfs_gh);
if (ret <= 0 && ret != -EFAULT) goto out_unlock;
from->count = orig_count - written; if (should_fault_in_pages(from, iocb, &prev_count, &window_size)) {
gfs2_glock_dq(gh); goto retry;
}
out_unlock: if (gfs2_holder_queued(gh))
gfs2_glock_dq(gh);
out_uninit:
gfs2_holder_uninit(gh);
kfree(statfs_gh);
from->count = orig_count - written; return written ? written : ret;
}
/** * gfs2_file_write_iter - Perform a write to a file * @iocb: The io context * @from: The data to write * * We have to do a lock/unlock here to refresh the inode size for * O_APPEND writes, otherwise we can land up writing at the wrong * offset. There is still a race, but provided the app is using its * own file locking, this will make O_APPEND work as expected. *
*/
/* * Note that under direct I/O, we don't allow and inode * timestamp updates, so we're not calling file_update_time() * here.
*/
ret = gfs2_file_direct_write(iocb, from, &gh); if (ret < 0 || !iov_iter_count(from)) goto out_unlock;
iocb->ki_flags |= IOCB_DSYNC;
buffered = gfs2_file_buffered_write(iocb, from, &gh); if (unlikely(buffered <= 0)) { if (!ret)
ret = buffered; goto out_unlock;
}
/* * We need to ensure that the page cache pages are written to * disk and invalidated to preserve the expected O_DIRECT * semantics. If the writeback or invalidate fails, only report * the direct I/O range as we don't know if the buffered pages * made it to disk.
*/
ret2 = generic_write_sync(iocb, buffered);
invalidate_mapping_pages(mapping,
(iocb->ki_pos - buffered) >> PAGE_SHIFT,
(iocb->ki_pos - 1) >> PAGE_SHIFT); if (!ret || ret2 > 0)
ret += ret2;
} else {
ret = file_update_time(file); if (ret) goto out_unlock;
ret = gfs2_file_buffered_write(iocb, from, &gh); if (likely(ret > 0))
ret = generic_write_sync(iocb, ret);
}
error = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(error)) return error;
gfs2_trans_add_meta(ip->i_gl, dibh);
if (gfs2_is_stuffed(ip)) {
error = gfs2_unstuff_dinode(ip); if (unlikely(error)) goto out;
}
while (offset < end) { struct iomap iomap = { };
error = gfs2_iomap_alloc(inode, offset, end - offset, &iomap); if (error) goto out;
offset = iomap.offset + iomap.length; if (!(iomap.flags & IOMAP_F_NEW)) continue;
error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits,
iomap.length >> inode->i_blkbits,
GFP_NOFS); if (error) {
fs_err(GFS2_SB(inode), "Failed to zero data buffers\n"); goto out;
}
}
out:
brelse(dibh); return error;
}
/** * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of * blocks, determine how many bytes can be written. * @ip: The inode in question. * @len: Max cap of bytes. What we return in *len must be <= this. * @data_blocks: Compute and return the number of data blocks needed * @ind_blocks: Compute and return the number of indirect blocks needed * @max_blocks: The total blocks available to work with. * * Returns: void, but @len, @data_blocks and @ind_blocks are filled in.
*/ staticvoid calc_max_reserv(struct gfs2_inode *ip, loff_t *len, unsignedint *data_blocks, unsignedint *ind_blocks, unsignedint max_blocks)
{
loff_t max = *len; conststruct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); unsignedint tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
while (len > 0) { if (len < bytes)
bytes = len; if (!gfs2_write_alloc_required(ip, offset, bytes)) {
len -= bytes;
offset += bytes; continue;
}
/* We need to determine how many bytes we can actually * fallocate without exceeding quota or going over the * end of the fs. We start off optimistically by assuming
* we can write max_bytes */
max_bytes = (len > max_chunk_size) ? max_chunk_size : len;
/* Since max_bytes is most likely a theoretical max, we * calculate a more realistic 'bytes' to serve as a good * starting point for the number of bytes we may be able
* to write */
gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
ap.target = data_blocks + ind_blocks;
error = gfs2_quota_lock_check(ip, &ap); if (error) return error; /* ap.allowed tells us how many blocks quota will allow
* us to write. Check if this reduces max_blks */
max_blks = UINT_MAX; if (ap.allowed)
max_blks = ap.allowed;
error = gfs2_inplace_reserve(ip, &ap); if (error) goto out_qunlock;
/* check if the selected rgrp limits our max_blks further */ if (ip->i_res.rs_reserved < max_blks)
max_blks = ip->i_res.rs_reserved;
/* Almost done. Calculate bytes that can be written using * max_blks. We also recompute max_bytes, data_blocks and
* ind_blocks */
calc_max_reserv(ip, &max_bytes, &data_blocks,
&ind_blocks, max_blks);
if (mode & ~(FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; /* fallocate is needed by gfs2_grow to reserve space in the rindex */ if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex) return -EOPNOTSUPP;
inode_lock(inode);
gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
ret = gfs2_glock_nq(&gh); if (ret) goto out_uninit;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
(offset + len) > inode->i_size) {
ret = inode_newsize_ok(inode, offset + len); if (ret) goto out_unlock;
}
ret = get_write_access(inode); if (ret) goto out_unlock;
if (mode & FALLOC_FL_PUNCH_HOLE) {
ret = __gfs2_punch_hole(file, offset, len);
} else {
ret = __gfs2_fallocate(file, mode, offset, len); if (ret)
gfs2_rs_deltree(&ip->i_res);
}
ret = iter_file_splice_write(pipe, out, ppos, len, flags); return ret;
}
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
/** * gfs2_lock - acquire/release a posix lock on a file * @file: the file pointer * @cmd: either modify or retrieve lock state, possibly wait * @fl: type and range of lock * * Returns: errno
*/
/** * gfs2_flock - acquire/release a flock lock on a file * @file: the file pointer * @cmd: either modify or retrieve lock state, possibly wait * @fl: type and range of lock * * Returns: errno
*/
staticint gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
{ if (!(fl->c.flc_flags & FL_FLOCK)) return -ENOLCK;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.25Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.