sf_handle = kmalloc(sizeof(*sf_handle), GFP_KERNEL); if (!sf_handle) return ERR_PTR(-ENOMEM);
/* the host may have given us different attr then requested */
sf_i->force_restat = 1;
/* init our handle struct and add it to the inode's handles list */
sf_handle->handle = handle;
sf_handle->root = VBOXSF_SBI(inode->i_sb)->root;
sf_handle->access_flags = access_flags;
kref_init(&sf_handle->refcount);
/* * We check the value of params.handle afterwards to find out if * the call succeeded or failed, as the API does not seem to cleanly * distinguish error and informational messages. * * Furthermore, we must set params.handle to SHFL_HANDLE_NIL to * make the shared folders host service use our mode parameter.
*/
params.handle = SHFL_HANDLE_NIL; if (file->f_flags & O_CREAT) {
params.create_flags |= SHFL_CF_ACT_CREATE_IF_NEW; /* * We ignore O_EXCL, as the Linux kernel seems to call create * beforehand itself, so O_EXCL should always fail.
*/ if (file->f_flags & O_TRUNC)
params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS; else
params.create_flags |= SHFL_CF_ACT_OPEN_IF_EXISTS;
} else {
params.create_flags |= SHFL_CF_ACT_FAIL_IF_NEW; if (file->f_flags & O_TRUNC)
params.create_flags |= SHFL_CF_ACT_OVERWRITE_IF_EXISTS;
}
staticint vboxsf_file_release(struct inode *inode, struct file *file)
{ /* * When a file is closed on our (the guest) side, we want any subsequent * accesses done on the host side to see all changes done from our side.
*/
filemap_write_and_wait(inode->i_mapping);
/* * Write back dirty pages now, because there may not be any suitable * open files later
*/ staticvoid vboxsf_vma_close(struct vm_area_struct *vma)
{
filemap_write_and_wait(vma->vm_file->f_mapping);
}
staticint vboxsf_file_mmap_prepare(struct vm_area_desc *desc)
{ int err;
err = generic_file_mmap_prepare(desc); if (!err)
desc->vm_ops = &vboxsf_file_vm_ops;
return err;
}
/* * Note that since we are accessing files on the host's filesystem, files * may always be changed underneath us by the host! * * The vboxsf API between the guest and the host does not offer any functions * to deal with this. There is no inode-generation to check for changes, no * events / callback on changes and no way to lock files. * * To avoid returning stale data when a file gets *opened* on our (the guest) * side, we do a "stat" on the host side, then compare the mtime with the * last known mtime and invalidate the page-cache if they differ. * This is done from vboxsf_inode_revalidate(). * * When reads are done through the read_iter fop, it is possible to do * further cache revalidation then, there are 3 options to deal with this: * * 1) Rely solely on the revalidation done at open time * 2) Do another "stat" and compare mtime again. Unfortunately the vboxsf * host API does not allow stat on handles, so we would need to use * file->f_path.dentry and the stat will then fail if the file was unlinked * or renamed (and there is no thing like NFS' silly-rename). So we get: * 2a) "stat" and compare mtime, on stat failure invalidate the cache * 2b) "stat" and compare mtime, on stat failure do nothing * 3) Simply always call invalidate_inode_pages2_range on the range of the read * * Currently we are keeping things KISS and using option 1. this allows * directly using generic_file_read_iter without wrapping it. * * This means that only data written on the host side before open() on * the guest side is guaranteed to be seen by the guest. If necessary * we may provide other read-cache strategies in the future and make this * configurable through a mount option.
*/ conststruct file_operations vboxsf_reg_fops = {
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
.write_iter = generic_file_write_iter,
.mmap_prepare = vboxsf_file_mmap_prepare,
.open = vboxsf_file_open,
.release = vboxsf_file_release,
.fsync = noop_fsync,
.splice_read = filemap_splice_read,
.setlease = simple_nosetlease,
};
/* zero the stale part of the folio if we did a short copy */ if (!folio_test_uptodate(folio) && copied < len)
folio_zero_range(folio, from + copied, len - copied);
if (!folio_test_uptodate(folio) && nwritten == folio_size(folio))
folio_mark_uptodate(folio);
pos += nwritten; if (pos > inode->i_size)
i_size_write(inode, pos);
out:
folio_unlock(folio);
folio_put(folio);
return nwritten;
}
/* * Note simple_write_begin does not read the page from disk on partial writes * this is ok since vboxsf_write_end only writes the written parts of the * page and it does not call folio_mark_uptodate for partial writes.
*/ conststruct address_space_operations vboxsf_reg_aops = {
.read_folio = vboxsf_read_folio,
.writepages = vboxsf_writepages,
.dirty_folio = filemap_dirty_folio,
.write_begin = simple_write_begin,
.write_end = vboxsf_write_end,
.migrate_folio = filemap_migrate_folio,
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.