/* * Prepare a subrequest to upload to the server. We need to allocate credits * so that we know the maximum amount of data that we can include in it.
*/ staticvoid cifs_prepare_write(struct netfs_io_subrequest *subreq)
{ struct cifs_io_subrequest *wdata =
container_of(subreq, struct cifs_io_subrequest, subreq); struct cifs_io_request *req = wdata->req; struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr]; struct TCP_Server_Info *server; struct cifsFileInfo *open_file = req->cfile; struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
size_t wsize = req->rreq.wsize; int rc;
if (!wdata->have_xid) {
wdata->xid = get_xid();
wdata->have_xid = true;
}
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
wdata->server = server;
if (cifs_sb->ctx->wsize == 0)
cifs_negotiate_wsize(server, cifs_sb->ctx,
tlink_tcon(req->cfile->tlink));
retry: if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, false); if (rc < 0) { if (rc == -EAGAIN) goto retry;
subreq->error = rc; return netfs_prepare_write_failed(subreq);
}
}
/* * Issue a read operation on behalf of the netfs helper functions. We're asked * to make a read of a certain size at a point in the file. We are permitted * to only read a portion of that, but as long as we read something, the netfs * helper will call us again so that we can issue another read.
*/ staticvoid cifs_issue_read(struct netfs_io_subrequest *subreq)
{ struct netfs_io_request *rreq = subreq->rreq; struct cifs_io_subrequest *rdata = container_of(subreq, struct cifs_io_subrequest, subreq); struct cifs_io_request *req = container_of(subreq->rreq, struct cifs_io_request, rreq); struct TCP_Server_Info *server = rdata->server; int rc = 0;
/* * Writeback calls this when it finds a folio that needs uploading. This isn't * called if writeback only has copy-to-cache to deal with.
*/ staticvoid cifs_begin_writeback(struct netfs_io_request *wreq)
{ struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq); int ret;
ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile); if (ret) {
cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret); return;
}
/* we do not want atime to be less than mtime, it broke some apps */
atime = inode_set_atime_to_ts(inode, current_time(inode));
mtime = inode_get_mtime(inode); if (timespec64_compare(&atime, &mtime))
inode_set_atime_to_ts(inode, inode_get_mtime(inode));
}
/* * Mark as invalid, all open files on tree connections since they * were closed when session to server was lost.
*/ void
cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
{ struct cifsFileInfo *open_file = NULL; struct list_head *tmp; struct list_head *tmp1;
/* only send once per connect */
spin_lock(&tcon->tc_lock); if (tcon->need_reconnect)
tcon->status = TID_NEED_RECON;
/* list all files open on tree connection and mark them invalid */
spin_lock(&tcon->open_file_lock);
list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
open_file = list_entry(tmp, struct cifsFileInfo, tlist);
open_file->invalidHandle = true;
open_file->oplock_break_cancelled = true;
}
spin_unlock(&tcon->open_file_lock);
invalidate_all_cached_dirs(tcon);
spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_FILES_INVALIDATE)
tcon->status = TID_NEED_TCON;
spin_unlock(&tcon->tc_lock);
/* * BB Add call to evict_inodes(sb) for all superblocks mounted * to this tcon.
*/
}
staticinlineint cifs_convert_flags(unsignedint flags, int rdwr_for_fscache)
{ if ((flags & O_ACCMODE) == O_RDONLY) return GENERIC_READ; elseif ((flags & O_ACCMODE) == O_WRONLY) return rdwr_for_fscache == 1 ? (GENERIC_READ | GENERIC_WRITE) : GENERIC_WRITE; elseif ((flags & O_ACCMODE) == O_RDWR) { /* GENERIC_ALL is too much permission to request
can cause unnecessary access denied on create */ /* return GENERIC_ALL; */ return (GENERIC_READ | GENERIC_WRITE);
}
if (flags & O_CREAT) {
posix_flags |= SMB_O_CREAT; if (flags & O_EXCL)
posix_flags |= SMB_O_EXCL;
} elseif (flags & O_EXCL)
cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
current->comm, current->tgid);
if (flags & O_TRUNC)
posix_flags |= SMB_O_TRUNC; /* be safe and imply O_SYNC for O_DSYNC */ if (flags & O_DSYNC)
posix_flags |= SMB_O_SYNC; if (flags & O_DIRECTORY)
posix_flags |= SMB_O_DIRECTORY; if (flags & O_NOFOLLOW)
posix_flags |= SMB_O_NOFOLLOW; if (flags & O_DIRECT)
posix_flags |= SMB_O_DIRECT;
/* get new inode and set it up */ if (*pinode == NULL) {
cifs_fill_uniqueid(sb, &fattr);
*pinode = cifs_iget(sb, &fattr); if (!*pinode) {
rc = -ENOMEM; goto posix_open_ret;
}
} else {
cifs_revalidate_mapping(*pinode);
rc = cifs_fattr_to_inode(*pinode, &fattr, false);
}
staticint cifs_nt_open(constchar *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, unsignedint f_flags, __u32 *oplock, struct cifs_fid *fid, unsignedint xid, struct cifs_open_info_data *buf)
{ int rc; int desired_access; int disposition; int create_options = CREATE_NOT_DIR; struct TCP_Server_Info *server = tcon->ses->server; struct cifs_open_parms oparms; int rdwr_for_fscache = 0;
if (!server->ops->open) return -ENOSYS;
/* If we're caching, we need to be able to fill in around partial writes. */ if (cifs_fscache_enabled(inode) && (f_flags & O_ACCMODE) == O_WRONLY)
rdwr_for_fscache = 1;
/********************************************************************* * open flag mapping table: * * POSIX Flag CIFS Disposition * ---------- ---------------- * O_CREAT FILE_OPEN_IF * O_CREAT | O_EXCL FILE_CREATE * O_CREAT | O_TRUNC FILE_OVERWRITE_IF * O_TRUNC FILE_OVERWRITE * none of the above FILE_OPEN * * Note that there is not a direct match between disposition * FILE_SUPERSEDE (ie create whether or not file exists although * O_CREAT | O_TRUNC is similar but truncates the existing * file rather than creating a new file as FILE_SUPERSEDE does * (which uses the attributes / metadata passed in on open call) *? *? O_SYNC is a reasonable match to CIFS writethrough flag *? and the read write flags match reasonably. O_LARGEFILE *? is irrelevant because largefile support is always used *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY, * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
*********************************************************************/
disposition = cifs_get_disposition(f_flags);
/* BB pass O_SYNC flag through on file attributes .. BB */
/* O_SYNC also has bit for O_DSYNC so following check picks up either */ if (f_flags & O_SYNC)
create_options |= CREATE_WRITE_THROUGH;
if (f_flags & O_DIRECT)
create_options |= CREATE_NO_BUFFER;
/* TODO: Add support for calling posix query info but with passing in fid */ if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
xid); else
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
xid, fid);
if (rc) {
server->ops->close(xid, tcon, fid); if (rc == -ESTALE)
rc = -EOPENSTALE;
}
/* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None.
*/ if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
oplock = 0;
}
/* if readable file instance put first in list*/
spin_lock(&cinode->open_file_lock); if (file->f_mode & FMODE_READ)
list_add(&cfile->flist, &cinode->openFileList); else
list_add_tail(&cfile->flist, &cinode->openFileList);
spin_unlock(&cinode->open_file_lock);
spin_unlock(&tcon->open_file_lock);
if (cifs_file->offload)
queue_work(fileinfo_put_wq, &cifs_file->put); else
cifsFileInfo_put_final(cifs_file);
}
/** * cifsFileInfo_put - release a reference of file priv data * * Always potentially wait for oplock handler. See _cifsFileInfo_put(). * * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file
*/ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
{
_cifsFileInfo_put(cifs_file, true, true);
}
/** * _cifsFileInfo_put - release a reference of file priv data * * This may involve closing the filehandle @cifs_file out on the * server. Must be called without holding tcon->open_file_lock, * cinode->open_file_lock and cifs_file->file_info_lock. * * If @wait_for_oplock_handler is true and we are releasing the last * reference, wait for any running oplock break handler of the file * and cancel any pending one. * * @cifs_file: cifs/smb3 specific info (eg refcounts) for an open file * @wait_oplock_handler: must be false if called from oplock_break_handler * @offload: not offloaded on close and oplock breaks *
*/ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler, bool offload)
{ struct inode *inode = d_inode(cifs_file->dentry); struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); struct TCP_Server_Info *server = tcon->ses->server; struct cifsInodeInfo *cifsi = CIFS_I(inode); struct super_block *sb = inode->i_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct cifs_fid fid = {}; struct cifs_pending_open open; bool oplock_break_cancelled; bool serverclose_offloaded = false;
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
/* store open in pending opens to make sure we don't miss lease break */
cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
/* remove it from the lists */
list_del(&cifs_file->flist);
list_del(&cifs_file->tlist);
atomic_dec(&tcon->num_local_opens);
if (list_empty(&cifsi->openFileList)) {
cifs_dbg(FYI, "closing last open instance for inode %p\n",
d_inode(cifs_file->dentry)); /* * In strict cache mode we need invalidate mapping on the last * close because it may cause a error when we open this file * again and get at least level II oplock.
*/ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
cifs_set_oplock_level(cifsi, 0);
}
if (rc == -EBUSY || rc == -EAGAIN) { // Server close failed, hence offloading it as an async op
queue_work(serverclose_wq, &cifs_file->serverclose);
serverclose_offloaded = true;
}
}
if (oplock_break_cancelled)
cifs_done_oplock_break(cifsi);
cifs_del_pending_open(&open);
// if serverclose has been offloaded to wq (on failure), it will // handle offloading put as well. If serverclose not offloaded, // we need to handle offloading put here. if (!serverclose_offloaded) { if (offload)
queue_work(fileinfo_put_wq, &cifs_file->put); else
cifsFileInfo_put_final(cifs_file);
}
}
int cifs_open(struct inode *inode, struct file *file)
/* Get the cached handle as SMB2 close is deferred */ if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
rc = cifs_get_writable_path(tcon, full_path,
FIND_WR_FSUID_ONLY |
FIND_WR_NO_PENDING_DELETE,
&cfile);
} else {
rc = cifs_get_readable_path(tcon, full_path, &cfile);
} if (rc == 0) { unsignedint oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC); unsignedint cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
(oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
file->private_data = cfile;
spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock); goto use_cache;
}
_cifsFileInfo_put(cfile, true, false);
} else { /* hard link on the defeered close file */
rc = cifs_get_hardlink_path(tcon, inode, file); if (rc)
cifs_close_deferred_file(CIFS_I(inode));
}
if (server->oplocks)
oplock = REQ_OPLOCK; else
oplock = 0;
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (!tcon->broken_posix_open && tcon->unix_ext &&
cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) { /* can not refresh inode info since size could be stale */
rc = cifs_posix_open(full_path, &inode, inode->i_sb,
cifs_sb->ctx->file_mode /* ignored */,
file->f_flags, &oplock, &fid.netfid, xid); if (rc == 0) {
cifs_dbg(FYI, "posix open succeeded\n");
posix_open_ok = true;
} elseif ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { if (tcon->ses->serverNOS)
cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
tcon->ses->ip_addr,
tcon->ses->serverNOS);
tcon->broken_posix_open = true;
} elseif ((rc != -EIO) && (rc != -EREMOTE) &&
(rc != -EOPNOTSUPP)) /* path not found or net err */ goto out; /* * Else fallthrough to retry open the old way on network i/o * or DFS errors.
*/
} #endif/* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
cifs_add_pending_open(&fid, tlink, &open);
if (!posix_open_ok) { if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &fid);
/* * Try to reacquire byte range locks that were released when session * to server was lost.
*/ staticint
cifs_relock_file(struct cifsFileInfo *cfile)
{ struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); #endif/* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); if (cinode->can_cache_brlcks) { /* can cache locks - no need to relock */
up_read(&cinode->lock_sem); return rc;
}
/* * Can not grab rename sem here because various ops, including those * that already have the rename sem can end up causing writepage to get * called and if the server was down that means we end up here, and we * can never tell if the caller already has the rename_sem.
*/
page = alloc_dentry_path();
full_path = build_path_from_dentry(cfile->dentry, page); if (IS_ERR(full_path)) {
mutex_unlock(&cfile->fh_mutex);
free_dentry_path(page);
free_xid(xid); return PTR_ERR(full_path);
}
if (tcon->ses->server->oplocks)
oplock = REQ_OPLOCK; else
oplock = 0;
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext && cap_unix(tcon->ses) &&
(CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) { /* * O_CREAT, O_EXCL and O_TRUNC already had their effect on the * original open. Must mask them off for a reopen.
*/ unsignedint oflags = cfile->f_flags &
~(O_CREAT | O_EXCL | O_TRUNC);
rc = cifs_posix_open(full_path, NULL, inode->i_sb,
cifs_sb->ctx->file_mode /* ignored */,
oflags, &oplock, &cfile->fid.netfid, xid); if (rc == 0) {
cifs_dbg(FYI, "posix reopen succeeded\n");
oparms.reconnect = true; goto reopen_success;
} /* * fallthrough to retry open the old way on errors, especially * in the reconnect path it is important to retry hard
*/
} #endif/* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
/* If we're caching, we need to be able to fill in around partial writes. */ if (cifs_fscache_enabled(inode) && (cfile->f_flags & O_ACCMODE) == O_WRONLY)
rdwr_for_fscache = 1;
/* * Can not refresh inode by passing in file_info buf to be returned by * ops->open and then calling get_inode_info with returned buf since * file might have write behind data that needs to be flushed and server * version of file size can be stale. If we knew for sure that inode was * not dirty locally we could do this.
*/
rc = server->ops->open(xid, &oparms, &oplock, NULL); if (rc == -ENOENT && oparms.reconnect == false) { /* durable handle timeout is expired - open the file again */
rc = server->ops->open(xid, &oparms, &oplock, NULL); /* indicate that we need to relock the file */
oparms.reconnect = true;
} if (rc == -EACCES && rdwr_for_fscache == 1) {
desired_access = cifs_convert_flags(cfile->f_flags, 0);
rdwr_for_fscache = 2; goto retry_open;
}
if (rc) {
mutex_unlock(&cfile->fh_mutex);
cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
cifs_dbg(FYI, "oplock: %d\n", oplock); goto reopen_error_exit;
}
if (rdwr_for_fscache == 2)
cifs_invalidate_cache(inode, FSCACHE_INVAL_DIO_WRITE);
if (can_flush) {
rc = filemap_write_and_wait(inode->i_mapping); if (!is_interrupt_error(rc))
mapping_set_error(inode->i_mapping, rc);
if (tcon->posix_extensions) {
rc = smb311_posix_get_inode_info(&inode, full_path,
NULL, inode->i_sb, xid);
} elseif (tcon->unix_ext) {
rc = cifs_get_inode_info_unix(&inode, full_path,
inode->i_sb, xid);
} else {
rc = cifs_get_inode_info(&inode, full_path, NULL,
inode->i_sb, xid, NULL);
}
} /* * Else we are writing out data to server already and could deadlock if * we tried to flush data, and since we do not know if we have data that * would invalidate the current end of file on the server we can not go * to the server to get the new inode info.
*/
/* * If the server returned a read oplock and we have mandatory brlocks, * set oplock level to None.
*/ if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
oplock = 0;
}
server->ops->set_fid(cfile, &cfile->fid, oplock); if (oparms.reconnect)
cifs_relock_file(cfile);
if (!tcon->use_persistent || !tcon->need_reopen_files) return;
tcon->need_reopen_files = false;
cifs_dbg(FYI, "Reopen persistent handles\n");
/* list all files open on tree connection, reopen resilient handles */
spin_lock(&tcon->open_file_lock);
list_for_each_entry(open_file, &tcon->openFileList, tlist) { if (!open_file->invalidHandle) continue;
cifsFileInfo_get(open_file);
list_add_tail(&open_file->rlist, &tmp_list);
}
spin_unlock(&tcon->open_file_lock);
list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { if (cifs_reopen_file(open_file, false/* do not flush */))
tcon->need_reopen_files = true;
list_del_init(&open_file->rlist);
cifsFileInfo_put(open_file);
}
}
xid = get_xid();
tcon = tlink_tcon(cfile->tlink);
server = tcon->ses->server;
cifs_dbg(FYI, "Freeing private data in close dir\n");
spin_lock(&cfile->file_info_lock); if (server->ops->dir_needs_close(cfile)) {
cfile->invalidHandle = true;
spin_unlock(&cfile->file_info_lock); if (server->ops->close_dir)
rc = server->ops->close_dir(xid, tcon, &cfile->fid); else
rc = -ENOSYS;
cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc); /* not much we can do if it fails anyway, ignore rc */
rc = 0;
} else
spin_unlock(&cfile->file_info_lock);
buf = cfile->srch_inf.ntwrk_buf_start; if (buf) {
cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
cfile->srch_inf.ntwrk_buf_start = NULL; if (cfile->srch_inf.smallBuf)
cifs_small_buf_release(buf); else
cifs_buf_release(buf);
}
cifs_put_tlink(cfile->tlink);
kfree(file->private_data);
file->private_data = NULL; /* BB can we lock the filestruct while this is going on? */
free_xid(xid); return rc;
}
/* * Check if there is another lock that prevents us to set the lock (mandatory * style). If such a lock exists, update the flock structure with its * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks * or leave it the same if we can't. Returns 0 if we don't need to request to * the server or 1 otherwise.
*/ staticint
cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
__u8 type, struct file_lock *flock)
{ int rc = 0; struct cifsLockInfo *conf_lock; struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; bool exist;
/* * Set the byte-range lock (mandatory style). Returns: * 1) 0, if we set the lock and don't need to request to the server; * 2) 1, if no locks prevent us but we need to request to the server; * 3) -EACCES, if there is a lock that prevents us and wait is false.
*/ staticint
cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock, bool wait)
{ struct cifsLockInfo *conf_lock; struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); bool exist; int rc = 0;
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Check if there is another lock that prevents us to set the lock (posix * style). If such a lock exists, update the flock structure with its * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks * or leave it the same if we can't. Returns 0 if we don't need to request to * the server or 1 otherwise.
*/ staticint
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{ int rc = 0; struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); unsignedchar saved_type = flock->c.flc_type;
if ((flock->c.flc_flags & FL_POSIX) == 0) return 1;
/* * Set the byte-range lock (posix style). Returns: * 1) <0, if the error occurs while setting the lock; * 2) 0, if we set the lock and don't need to request to the server; * 3) FILE_LOCK_DEFERRED, if we will wait for some other file_lock; * 4) FILE_LOCK_DEFERRED + 1, if we need to request to the server.
*/ staticint
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{ struct cifsInodeInfo *cinode = CIFS_I(file_inode(file)); int rc = FILE_LOCK_DEFERRED + 1;
if ((flock->c.flc_flags & FL_POSIX) == 0) return rc;
cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem); return rc;
}
/* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf; if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
free_xid(xid); return -EINVAL;
}
/* * Allocating count locks is enough because no FL_POSIX locks can be * added to the list while we are holding cinode->lock_sem that * protects locking operations of this inode.
*/ for (i = 0; i < count; i++) {
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); if (!lck) {
rc = -ENOMEM; goto err_out;
}
list_add_tail(&lck->llist, &locks_to_send);
}
/* we are going to update can_cache_brlcks here - need a write access */
cifs_down_write(&cinode->lock_sem); if (!cinode->can_cache_brlcks) {
up_write(&cinode->lock_sem); return rc;
}
/* * Accessing maxBuf is racy with cifs_reconnect - need to store value * and check it before using.
*/
max_buf = tcon->ses->server->maxBuf; if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) return -EINVAL;
cifs_down_write(&cinode->lock_sem); for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) { if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
(li->offset + li->length)) continue; if (current->tgid != li->pid) continue; if (types[i] != li->type) continue; if (cinode->can_cache_brlcks) { /* * We can cache brlock requests - simply remove * a lock from the file's list.
*/
list_del(&li->llist);
cifs_del_lock_waiters(li);
kfree(li); continue;
}
cur->Pid = cpu_to_le16(li->pid);
cur->LengthLow = cpu_to_le32((u32)li->length);
cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
cur->OffsetLow = cpu_to_le32((u32)li->offset);
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); /* * We need to save a lock here to let us add it again to * the file's list if the unlock range request fails on * the server.
*/
list_move(&li->llist, &tmp_llist); if (++num == max_num) {
stored_rc = cifs_lockv(xid, tcon,
cfile->fid.netfid,
li->type, num, 0, buf); if (stored_rc) { /* * We failed on the unlock range * request - add all locks from the tmp * list to the head of the file's list.
*/
cifs_move_llist(&tmp_llist,
&cfile->llist->locks);
rc = stored_rc;
} else /* * The unlock range request succeed - * free the tmp list.
*/
cifs_free_llist(&tmp_llist);
cur = buf;
num = 0;
} else
cur++;
} if (num) {
stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
types[i], num, 0, buf); if (stored_rc) {
cifs_move_llist(&tmp_llist,
&cfile->llist->locks);
rc = stored_rc;
} else
cifs_free_llist(&tmp_llist);
}
}
lock = cifs_lock_init(flock->fl_start, length, type,
flock->c.flc_flags); if (!lock) return -ENOMEM;
rc = cifs_lock_add_if(cfile, lock, wait_flag); if (rc < 0) {
kfree(lock); return rc;
} if (!rc) goto out;
/* * Windows 7 server can delay breaking lease from read to None * if we set a byte-range lock on a file - break it explicitly * before sending the lock to the server to be sure the next * read won't conflict with non-overlapted locks due to * pagereading.
*/ if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
CIFS_CACHE_READ(CIFS_I(inode))) {
cifs_zap_mapping(inode);
cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
inode);
CIFS_I(inode)->oplock = 0;
}
out: if ((flock->c.flc_flags & FL_POSIX) || (flock->c.flc_flags & FL_FLOCK)) { /* * If this is a request to remove all locks because we * are closing the file, it doesn't matter if the * unlocking failed as both cifs.ko and the SMB server * remove the lock on file close
*/ if (rc) {
cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc); if (!(flock->c.flc_flags & FL_CLOSE)) return rc;
}
rc = locks_lock_file_wait(file, flock);
} return rc;
}
int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
{ int rc, xid; int lock = 0, unlock = 0; bool wait_flag = false; bool posix_lck = false;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.24 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.