// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) *
*/
/* The xid serves as a useful identifier for each incoming vfs request, in a similar way to the mid which is useful to track each sent smb, and CurrentXid can also provide a running counter (although it will eventually wrap past zero) of the total vfs operations handled
since the cifs fs was mounted */
/* keep high water mark for number of simultaneous ops in filesystem */ if (GlobalTotalActiveXid > GlobalMaxActiveXid)
GlobalMaxActiveXid = GlobalTotalActiveXid; if (GlobalTotalActiveXid > 65000)
cifs_dbg(FYI, "warning: more than 65000 requests active\n");
xid = GlobalCurrentXid++;
spin_unlock(&GlobalMid_Lock); return xid;
}
struct smb_hdr *
cifs_buf_get(void)
{ struct smb_hdr *ret_buf = NULL; /* * SMB2 header is bigger than CIFS one - no problems to clean some * more bytes for CIFS.
*/
size_t buf_size = sizeof(struct smb2_hdr);
/* * We could use negotiated size instead of max_msgsize - * but it may be more efficient to always alloc same size * albeit slightly larger than necessary and maxbuffersize * defaults to this and can not be bigger.
*/
ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
/* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */
memset(ret_buf, 0, buf_size + 3);
atomic_inc(&buf_alloc_count); #ifdef CONFIG_CIFS_STATS2
atomic_inc(&total_buf_alloc_count); #endif/* CONFIG_CIFS_STATS2 */
return ret_buf;
}
void
cifs_buf_release(void *buf_to_free)
{ if (buf_to_free == NULL) { /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/ return;
}
mempool_free(buf_to_free, cifs_req_poolp);
/* We could use negotiated size instead of max_msgsize - but it may be more efficient to always alloc same size albeit slightly larger than necessary and maxbuffersize
defaults to this and can not be bigger */
ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
atomic_inc(&small_buf_alloc_count); #ifdef CONFIG_CIFS_STATS2
atomic_inc(&total_small_buf_alloc_count); #endif/* CONFIG_CIFS_STATS2 */
return ret_buf;
}
void
cifs_small_buf_release(void *buf_to_free)
{
if (buf_to_free == NULL) {
cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n"); return;
}
mempool_free(buf_to_free, cifs_sm_req_poolp);
/* NB: MID can not be set if treeCon not passed in, in that
case it is responsibility of caller to set the mid */ void
header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , conststruct cifs_tcon *treeCon, int word_count /* length of fixed section (word count) in two byte units */)
{ char *temp = (char *) buffer;
memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
buffer->smb_buf_length = cpu_to_be32(
(2 * word_count) + sizeof(struct smb_hdr) -
4 /* RFC 1001 length field does not count */ +
2 /* for bcc field itself */) ;
/* Uid is not converted */
buffer->Uid = treeCon->ses->Suid; if (treeCon->ses->server)
buffer->Mid = get_next_mid(treeCon->ses->server);
} if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
buffer->Flags2 |= SMBFLG2_DFS; if (treeCon->nocase)
buffer->Flags |= SMBFLG_CASELESS; if ((treeCon->ses) && (treeCon->ses->server)) if (treeCon->ses->server->sign)
buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
}
/* endian conversion of flags is now done just before sending */
buffer->WordCount = (char) word_count; return;
}
staticint
check_smb_hdr(struct smb_hdr *smb)
{ /* does it have the right SMB "signature" ? */ if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
*(unsignedint *)smb->Protocol); return 1;
}
/* if it's a response then accept */ if (smb->Flags & SMBFLG_RESPONSE) return 0;
/* only one valid case where server sends us request */ if (smb->Command == SMB_COM_LOCKING_ANDX) return 0;
/* * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING * or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other) * for some TRANS2 requests without the RESPONSE flag set in header.
*/ if (smb->Command == SMB_COM_TRANSACTION2 && smb->Status.CifsError != 0) return 0;
cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
get_mid(smb)); return 1;
}
/* is this frame too small to even get to a BCC? */ if (total_read < 2 + sizeof(struct smb_hdr)) { if ((total_read >= sizeof(struct smb_hdr) - 1)
&& (smb->Status.CifsError != 0)) { /* it's an error return */
smb->WordCount = 0; /* some error cases do not return wct and bcc */ return 0;
} elseif ((total_read == sizeof(struct smb_hdr) + 1) &&
(smb->WordCount == 0)) { char *tmp = (char *)smb; /* Need to work around a bug in two servers here */ /* First, check if the part of bcc they sent was zero */ if (tmp[sizeof(struct smb_hdr)] == 0) { /* some servers return only half of bcc * on simple responses (wct, bcc both zero) * in particular have seen this on * ulogoffX and FindClose. This leaves * one byte of bcc potentially uninitialized
*/ /* zero rest of bcc */
tmp[sizeof(struct smb_hdr)+1] = 0; return 0;
}
cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
} else {
cifs_dbg(VFS, "Length less than smb header size\n");
} return -EIO;
} elseif (total_read < sizeof(*smb) + 2 * smb->WordCount) {
cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
__func__, smb->WordCount); return -EIO;
}
/* otherwise, there is enough to get to the BCC */ if (check_smb_hdr(smb)) return -EIO;
clc_len = smbCalcSize(smb);
if (4 + rfclen != total_read) {
cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
rfclen); return -EIO;
}
if (4 + rfclen != clc_len) {
__u16 mid = get_mid(smb); /* check if bcc wrapped around for large read responses */ if ((rfclen > 64 * 1024) && (rfclen > clc_len)) { /* check if lengths match mod 64K */ if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF)) return 0; /* bcc wrapped */
}
cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
clc_len, 4 + rfclen, mid);
if (4 + rfclen < clc_len) {
cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
rfclen, mid); return -EIO;
} elseif (rfclen > clc_len + 512) { /* * Some servers (Windows XP in particular) send more * data than the lengths in the SMB packet would * indicate on certain calls (byte range locks and * trans2 find first calls in particular). While the * client can handle such a frame by ignoring the * trailing data, we choose limit the amount of extra * data to 512 bytes.
*/
cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
rfclen, mid); return -EIO;
}
} return 0;
}
cifs_dbg(FYI, "Checking for oplock break or dnotify response\n"); if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
(pSMB->hdr.Flags & SMBFLG_RESPONSE)) { struct smb_com_transaction_change_notify_rsp *pSMBr =
(struct smb_com_transaction_change_notify_rsp *)buf; struct file_notify_information *pnotify;
__u32 data_offset = 0;
size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
if (get_bcc(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
if (data_offset >
len - sizeof(struct file_notify_information)) {
cifs_dbg(FYI, "Invalid data_offset %u\n",
data_offset); returntrue;
}
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
pnotify->FileName, pnotify->Action); /* cifs_dump_mem("Rcvd notify Data: ",buf,
sizeof(struct smb_hdr)+60); */ returntrue;
} if (pSMBr->hdr.Status.CifsError) {
cifs_dbg(FYI, "notify err 0x%x\n",
pSMBr->hdr.Status.CifsError); returntrue;
} returnfalse;
} if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX) returnfalse; if (pSMB->hdr.Flags & SMBFLG_RESPONSE) { /* no sense logging error on invalid handle on oplock break - harmless race between close request and oplock break response is expected from time to time writing out
large dirty files cached on the client */ if ((NT_STATUS_INVALID_HANDLE) ==
le32_to_cpu(pSMB->hdr.Status.CifsError)) {
cifs_dbg(FYI, "Invalid handle on oplock break\n"); returntrue;
} elseif (ERRbadfid ==
le16_to_cpu(pSMB->hdr.Status.DosError.Error)) { returntrue;
} else { returnfalse; /* on valid oplock brk we get "request" */
}
} if (pSMB->hdr.WordCount != 8) returnfalse;
cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
pSMB->LockType, pSMB->OplockLevel); if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE)) returnfalse;
/* If server is a channel, select the primary channel */
pserver = SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
/* look up tcon based on tid & uid */
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { if (cifs_ses_exiting(ses)) continue;
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid != buf->Tid) continue;
if (cifs_sb->master_tlink)
tcon = cifs_sb_master_tcon(cifs_sb);
cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
cifs_sb->mnt_cifs_serverino_autodisabled = true;
cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
tcon ? tcon->tree_name : "new server");
cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
/* * We wait for oplock breaks to be processed before we attempt to perform * writes.
*/ int cifs_get_writer(struct cifsInodeInfo *cinode)
{ int rc;
start:
rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
TASK_KILLABLE); if (rc) return rc;
spin_lock(&cinode->writers_lock); if (!cinode->writers)
set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
cinode->writers++; /* Check to see if we have started servicing an oplock break */ if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
cinode->writers--; if (cinode->writers == 0) {
clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
}
spin_unlock(&cinode->writers_lock); goto start;
}
spin_unlock(&cinode->writers_lock); return 0;
}
/** * cifs_queue_oplock_break - queue the oplock break handler for cfile * @cfile: The file to break the oplock on * * This function is called from the demultiplex thread when it * receives an oplock break for @cfile. * * Assumes the tcon->open_file_lock is held. * Assumes cfile->file_info_lock is NOT held.
*/ void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
{ /* * Bump the handle refcount now while we hold the * open_file_lock to enforce the validity of it for the oplock * break handler. The matching put is done at the end of the * handler.
*/
cifsFileInfo_get(cfile);
/* * Critical section which runs after acquiring deferred_lock. * As there is no reference count on cifs_deferred_close, pdclose * should not be used outside deferred_lock.
*/ bool
cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
{ struct cifs_deferred_close *dclose;
/* * If a dentry has been deleted, all corresponding open handles should know that * so that we do not defer close them.
*/ void cifs_mark_open_handles_for_deleted_file(struct inode *inode, constchar *path)
{ struct cifsFileInfo *cfile; void *page; constchar *full_path; struct cifsInodeInfo *cinode = CIFS_I(inode);
/* * note: we need to construct path from dentry and compare only if the * inode has any hardlinks. When number of hardlinks is 1, we can just * mark all open handles since they are going to be from the same file.
*/ if (inode->i_nlink > 1) {
list_for_each_entry(cfile, &cinode->openFileList, flist) {
full_path = build_path_from_dentry(cfile->dentry, page); if (!IS_ERR(full_path) && strcmp(full_path, path) == 0)
cfile->status_file_deleted = true;
}
} else {
list_for_each_entry(cfile, &cinode->openFileList, flist)
cfile->status_file_deleted = true;
}
spin_unlock(&cinode->open_file_lock);
free_dentry_path(page);
}
/* parses DFS referral V3 structure * caller is responsible for freeing target_nodes * returns: * - on success - 0 * - on failure - errno
*/ int
parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size, unsignedint *num_of_nodes, struct dfs_info3_param **target_nodes, conststruct nls_table *nls_codepage, int remap, constchar *searchName, bool is_unicode)
{ int i, rc = 0; char *data_end; struct dfs_referral_level_3 *ref;
if (rsp_size < sizeof(*rsp)) {
cifs_dbg(VFS | ONCE, "%s: header is malformed (size is %u, must be %zu)\n",
__func__, rsp_size, sizeof(*rsp));
rc = -EINVAL; goto parse_DFS_referrals_exit;
}
if (*num_of_nodes < 1) {
cifs_dbg(VFS | ONCE, "%s: [path=%s] num_referrals must be at least > 0, but we got %d\n",
__func__, searchName, *num_of_nodes);
rc = -ENOENT; goto parse_DFS_referrals_exit;
}
if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) {
cifs_dbg(VFS | ONCE, "%s: malformed buffer (size is %u, must be at least %zu)\n",
__func__, rsp_size, sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3));
rc = -EINVAL; goto parse_DFS_referrals_exit;
}
ref = (struct dfs_referral_level_3 *) &(rsp->referrals); if (ref->VersionNumber != cpu_to_le16(3)) {
cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
le16_to_cpu(ref->VersionNumber));
rc = -EINVAL; goto parse_DFS_referrals_exit;
}
/* get the upper boundary of the resp buffer */
data_end = (char *)rsp + rsp_size;
/* collect necessary data from referrals */ for (i = 0; i < *num_of_nodes; i++) { char *temp; int max_len; struct dfs_info3_param *node = (*target_nodes)+i;
/** * cifs_alloc_hash - allocate hash and hash context together * @name: The name of the crypto hash algo * @sdesc: SHASH descriptor where to put the pointer to the hash TFM * * The caller has to make sure @sdesc is initialized to either NULL or * a valid context. It can be freed via cifs_free_hash().
*/ int
cifs_alloc_hash(constchar *name, struct shash_desc **sdesc)
{ int rc = 0; struct crypto_shash *alg = NULL;
*sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL); if (*sdesc == NULL) {
cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
crypto_free_shash(alg); return -ENOMEM;
}
(*sdesc)->tfm = alg; return 0;
}
/** * cifs_free_hash - free hash and hash context together * @sdesc: Where to find the pointer to the hash TFM * * Freeing a NULL descriptor is safe.
*/ void
cifs_free_hash(struct shash_desc **sdesc)
{ if (unlikely(!sdesc) || !*sdesc) return;
if ((*sdesc)->tfm) {
crypto_free_shash((*sdesc)->tfm);
(*sdesc)->tfm = NULL;
}
/** * copy_path_name - copy src path to dst, possibly truncating * @dst: The destination buffer * @src: The source name * * returns number of bytes written (including trailing nul)
*/ int copy_path_name(char *dst, constchar *src)
{ int name_len;
/* * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it * will truncate and strlen(dst) will be PATH_MAX-1
*/
name_len = strscpy(dst, src, PATH_MAX); if (WARN_ON_ONCE(name_len < 0))
name_len = PATH_MAX-1;
/* we count the trailing nul */
name_len++; return name_len;
}
for (; *fs_type; fs_type++) {
iterate_supers_type(*fs_type, f, &sd); if (sd.sb) { /* * Grab an active reference in order to prevent automounts (DFS links) * of expiring and then freeing up our cifs superblock pointer while * we're doing failover.
*/
cifs_sb_active(sd.sb); return sd.sb;
}
}
pr_warn_once("%s: could not find dfs superblock\n", __func__); return ERR_PTR(-EINVAL);
}
staticvoid __cifs_put_super(struct super_block *sb)
{ if (!IS_ERR_OR_NULL(sb))
cifs_sb_deactive(sb);
}
/* * Handle weird Windows SMB server behaviour. It responds with * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains * non-ASCII unicode symbols.
*/ int cifs_inval_name_dfs_link_error(constunsignedint xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, constchar *full_path, bool *islink)
{ struct TCP_Server_Info *server = tcon->ses->server; struct cifs_ses *ses = tcon->ses;
size_t len; char *path; char *ref_path;
*islink = false;
/* * Fast path - skip check when @full_path doesn't have a prefix path to * look up or tcon is not DFS.
*/ if (strlen(full_path) < 2 || !cifs_sb ||
(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
!is_tcon_dfs(tcon)) return 0;
spin_lock(&server->srv_lock); if (!server->leaf_fullpath) {
spin_unlock(&server->srv_lock); return 0;
}
spin_unlock(&server->srv_lock);
/* * Slow path - tcon is DFS and @full_path has prefix path, so attempt * to get a referral to figure out whether it is an DFS link.
*/
len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
path = kmalloc(len, GFP_KERNEL); if (!path) return -ENOMEM;
if (IS_ERR(ref_path)) { if (PTR_ERR(ref_path) != -EINVAL) return PTR_ERR(ref_path);
} else { struct dfs_info3_param *refs = NULL; int num_refs = 0;
/* * XXX: we are not using dfs_cache_find() here because we might * end up filling all the DFS cache and thus potentially * removing cached DFS targets that the client would eventually * need during failover.
*/
ses = CIFS_DFS_ROOT_SES(ses); if (ses->server->ops->get_dfs_refer &&
!ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
&num_refs, cifs_sb->local_nls,
cifs_remap(cifs_sb)))
*islink = refs[0].server_type == DFS_TYPE_LINK;
free_dfs_info_array(refs, num_refs);
kfree(ref_path);
} return 0;
} #endif
int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
{ int timeout = 10; int rc;
/* * Give demultiplex thread up to 10 seconds to each target available for * reconnect -- should be greater than cifs socket timeout which is 7 * seconds. * * On "soft" mounts we wait once. Hard mounts keep retrying until * process is killed or server comes back on-line.
*/ do {
rc = wait_event_interruptible_timeout(server->response_q,
(server->tcpStatus != CifsNeedReconnect),
timeout * HZ); if (rc < 0) {
cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
__func__); return -ERESTARTSYS;
}
/* are we still trying to reconnect? */
spin_lock(&server->srv_lock); if (server->tcpStatus != CifsNeedReconnect) {
spin_unlock(&server->srv_lock); return 0;
}
spin_unlock(&server->srv_lock);
} while (retry);
cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__); return -EHOSTDOWN;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.