inode_lock(inode);
ret = generic_write_checks(iocb, from); if (ret <= 0) goto out_unlock;
ret = file_remove_privs(file); if (ret) goto out_unlock;
ret = file_update_time(file); if (ret) goto out_unlock;
ret = dax_iomap_rw(iocb, from, &ext2_iomap_ops); if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
i_size_write(inode, iocb->ki_pos);
mark_inode_dirty(inode);
}
out_unlock:
inode_unlock(inode); if (ret > 0)
ret = generic_write_sync(iocb, ret); return ret;
}
/* * The lock ordering for ext2 DAX fault paths is: * * mmap_lock (MM) * sb_start_pagefault (vfs, freeze) * address_space->invalidate_lock * address_space->i_mmap_rwsem or page_lock (mutually exclusive in DAX) * ext2_inode_info->truncate_mutex * * The default page_lock and i_size verification done by non-DAX fault paths * is sufficient because ext2 doesn't support hole punching.
*/ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf)
{ struct inode *inode = file_inode(vmf->vma->vm_file);
vm_fault_t ret; bool write = (vmf->flags & FAULT_FLAG_WRITE) &&
(vmf->vma->vm_flags & VM_SHARED);
if (write) {
sb_start_pagefault(inode->i_sb);
file_update_time(vmf->vma->vm_file);
}
filemap_invalidate_lock_shared(inode->i_mapping);
ret = dax_iomap_fault(vmf, 0, NULL, NULL, &ext2_iomap_ops);
filemap_invalidate_unlock_shared(inode->i_mapping); if (write)
sb_end_pagefault(inode->i_sb); return ret;
}
staticconststruct vm_operations_struct ext2_dax_vm_ops = {
.fault = ext2_dax_fault, /* * .huge_fault is not supported for DAX because allocation in ext2 * cannot be reliably aligned to huge page sizes and so pmd faults * will always fail and fail back to regular faults.
*/
.page_mkwrite = ext2_dax_fault,
.pfn_mkwrite = ext2_dax_fault,
};
/* * Called when filp is released. This happens when all file descriptors * for a single struct file are closed. Note that different open() calls * for the same file yield different struct file structures.
*/ staticint ext2_release_file (struct inode * inode, struct file * filp)
{ if (filp->f_mode & FMODE_WRITE) {
mutex_lock(&EXT2_I(inode)->truncate_mutex);
ext2_discard_reservation(inode);
mutex_unlock(&EXT2_I(inode)->truncate_mutex);
} return 0;
}
int ext2_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{ int ret; struct super_block *sb = file->f_mapping->host->i_sb;
ret = generic_buffers_fsync(file, start, end, datasync); if (ret == -EIO) /* We don't really know where the IO error happened... */
ext2_error(sb, __func__, "detected IO error when writing metadata buffers"); return ret;
}
trace_ext2_dio_read_begin(iocb, to, 0);
inode_lock_shared(inode);
ret = iomap_dio_rw(iocb, to, &ext2_iomap_ops, NULL, 0, NULL, 0);
inode_unlock_shared(inode);
trace_ext2_dio_read_end(iocb, to, ret);
/* * If we are extending the file, we have to update i_size here before * page cache gets invalidated in iomap_dio_rw(). This prevents racing * buffered reads from zeroing out too much from page cache pages. * Note that all extending writes always happens synchronously with * inode lock held by ext2_dio_write_iter(). So it is safe to update * inode size here for extending file writes.
*/
pos += size; if (pos > i_size_read(inode)) {
i_size_write(inode, pos);
mark_inode_dirty(inode);
}
out:
trace_ext2_dio_write_endio(iocb, size, error); return error;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.