spin_lock(&server->req_lock);
val = server->ops->get_credits_field(server, optype);
/* eg found case where write overlapping reconnect messed up credits */ if (((optype & CIFS_OP_MASK) == CIFS_NEG_OP) && (*val != 0))
reconnect_with_invalid_credits = true;
switch (rc) { case -1: /* change_conf hasn't been executed */ break; case 0:
cifs_server_dbg(VFS, "Possible client or server bug - zero credits\n"); break; case 1:
cifs_server_dbg(VFS, "disabling echoes and oplocks\n"); break; case 2:
cifs_dbg(FYI, "disabling oplocks\n"); break; default: /* change_conf rebalanced credits for different types */ break;
}
staticvoid
smb2_set_credits(struct TCP_Server_Info *server, constint val)
{ int scredits, in_flight;
spin_lock(&server->req_lock);
server->credits = val; if (val == 1) {
server->reconnect_instance++; /* * ChannelSequence updated for all channels in primary channel so that consistent * across SMB3 requests sent on any channel. See MS-SMB2 3.2.4.1 and 3.2.7.1
*/ if (SERVER_IS_CHAN(server))
server->primary_server->channel_sequence_num++; else
server->channel_sequence_num++;
}
scredits = server->credits;
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
static __u64
smb2_get_next_mid(struct TCP_Server_Info *server)
{
__u64 mid; /* for SMB2 we need the current value */
spin_lock(&server->mid_counter_lock);
mid = server->current_mid++;
spin_unlock(&server->mid_counter_lock); return mid;
}
/* start with specified rsize, or default */
rsize = ctx->got_rsize ? ctx->vol_rsize : SMB3_DEFAULT_IOSIZE;
rsize = min_t(unsignedint, rsize, server->max_read); #ifdef CONFIG_CIFS_SMB_DIRECT if (server->rdma) { struct smbdirect_socket_parameters *sp =
&server->smbd_conn->socket.parameters;
if (server->sign) /* * Account for SMB2 data transfer packet header and * possible encryption header
*/
rsize = min_t(unsignedint,
rsize,
sp->max_fragmented_recv_size -
SMB2_READWRITE_PDU_HEADER_SIZE - sizeof(struct smb2_transform_hdr)); else
rsize = min_t(unsignedint,
rsize, sp->max_read_write_size);
} #endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
rsize = min_t(unsignedint, rsize, SMB2_MAX_BUFFER_SIZE);
return prevent_zero_iosize(rsize, "r");
}
/* * compare two interfaces a and b * return 0 if everything matches. * return 1 if a is rdma capable, or rss capable, or has higher link speed * return -1 otherwise.
*/ staticint
iface_cmp(struct cifs_server_iface *a, struct cifs_server_iface *b)
{ int cmp_ret = 0;
spin_lock(&ses->iface_lock); /* do not query too frequently, this time with lock held */ if (ses->iface_last_update &&
time_before(jiffies, ses->iface_last_update +
(SMB_INTERFACE_POLL_INTERVAL * HZ))) {
spin_unlock(&ses->iface_lock); return 0;
}
/* * Go through iface_list and mark them as inactive
*/
list_for_each_entry_safe(iface, niface, &ses->iface_list,
iface_head)
iface->is_active = 0;
spin_unlock(&ses->iface_lock);
/* * Samba server e.g. can return an empty interface list in some cases, * which would only be a problem if we were requesting multichannel
*/ if (bytes_left == 0) { /* avoid spamming logs every 10 minutes, so log only in mount */ if ((ses->chan_max > 1) && in_mount)
cifs_dbg(VFS, "multichannel not available\n" "Empty network interface list returned by server %s\n",
ses->server->hostname);
rc = -EOPNOTSUPP;
ses->iface_last_update = jiffies; goto out;
}
while (bytes_left >= (ssize_t)sizeof(*p)) {
memset(&tmp_iface, 0, sizeof(tmp_iface)); /* default to 1Gbps when link speed is unset */
tmp_iface.speed = le64_to_cpu(p->LinkSpeed) ?: 1000000000;
tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0;
tmp_iface.rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE) ? 1 : 0;
switch (p->Family) { /* * The kernel and wire socket structures have the same * layout and use network byte order but make the * conversion explicit in case either one changes.
*/ case INTERNETWORK:
addr4 = (struct sockaddr_in *)&tmp_iface.sockaddr;
p4 = (struct iface_info_ipv4 *)p->Buffer;
addr4->sin_family = AF_INET;
memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
addr4->sin_port = cpu_to_be16(CIFS_PORT);
/* * The iface_list is assumed to be sorted by speed. * Check if the new interface exists in that list. * NEVER change iface. it could be in use. * Add a new one instead
*/
spin_lock(&ses->iface_lock);
list_for_each_entry_safe(iface, niface, &ses->iface_list,
iface_head) {
ret = iface_cmp(iface, &tmp_iface); if (!ret) {
iface->is_active = 1;
spin_unlock(&ses->iface_lock); goto next_iface;
} elseif (ret < 0) { /* all remaining ifaces are slower */
kref_get(&iface->refcount); break;
}
}
spin_unlock(&ses->iface_lock);
/* no match. insert the entry in the list */
info = kmalloc(sizeof(struct cifs_server_iface),
GFP_KERNEL); if (!info) {
rc = -ENOMEM; goto out;
}
memcpy(info, &tmp_iface, sizeof(tmp_iface));
/* add this new entry to the list */
kref_init(&info->refcount);
info->is_active = 1;
/* Azure rounds the buffer size up 8, to a 16 byte boundary */ if ((bytes_left > 8) ||
(bytes_left >= offsetof(struct network_interface_info_ioctl_rsp, Next)
+ sizeof(p->Next) && p->Next))
cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
ses->iface_last_update = jiffies;
out: /* * Go through the list again and put the inactive entries
*/
spin_lock(&ses->iface_lock);
list_for_each_entry_safe(iface, niface, &ses->iface_list,
iface_head) { if (!iface->is_active) {
list_del(&iface->iface_head);
kref_put(&iface->refcount, release_iface);
ses->iface_count--;
}
}
spin_unlock(&ses->iface_lock);
/* do not query too frequently */ if (ses->iface_last_update &&
time_before(jiffies, ses->iface_last_update +
(SMB_INTERFACE_POLL_INTERVAL * HZ))) return 0;
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO,
NULL /* no data input */, 0 /* no data input */,
CIFSMaxBufSize, (char **)&out_buf, &ret_data_len); if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI, "server does not support query network interfaces\n");
ret_data_len = 0;
} elseif (rc != 0) {
cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc); goto out;
}
rc = parse_server_interfaces(out_buf, ret_data_len, ses, in_mount); if (rc) goto out;
/* check if iface is still active */
spin_lock(&ses->chan_lock);
pserver = ses->chans[0].server; if (pserver && !cifs_chan_is_iface_active(ses, pserver)) {
spin_unlock(&ses->chan_lock);
cifs_chan_update_iface(ses, pserver);
spin_lock(&ses->chan_lock);
}
spin_unlock(&ses->chan_lock);
rc = smb2_query_info_compound(xid, tcon, path,
FILE_READ_EA,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE,
CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE,
&rsp_iov, &buftype, cifs_sb); if (rc) { /* * If ea_name is NULL (listxattr) and there are no EAs, * return 0 as it's not an error. Otherwise, the specified * ea_name was not found.
*/ if (!ea_name && rc == -ENODATA)
rc = 0; goto qeas_exit;
}
replay_again: /* reinitialize for possible replay */
flags = CIFS_CP_CREATE_CLOSE_OP;
oplock = SMB2_OPLOCK_LEVEL_NONE;
server = cifs_pick_channel(ses);
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
if (ea_name_len > 255) return -EINVAL;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) return -ENOMEM;
ea = NULL;
resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
vars = kzalloc(sizeof(*vars), GFP_KERNEL); if (!vars) {
rc = -ENOMEM; goto out_free_path;
}
rqst = vars->rqst;
rsp_iov = vars->rsp_iov;
if (ses->server->ops->query_all_EAs) { if (!ea_value) {
rc = ses->server->ops->query_all_EAs(xid, tcon, path,
ea_name, NULL, 0,
cifs_sb); if (rc == -ENODATA) goto sea_exit;
} else { /* If we are adding a attribute we should first check * if there will be enough space available to store * the new EA. If not we should not add it since we * would not be able to even read the EAs back.
*/
rc = smb2_query_info_compound(xid, tcon, path,
FILE_READ_EA,
FILE_FULL_EA_INFORMATION,
SMB2_O_INFO_FILE,
CIFSMaxBufSize -
MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE,
&rsp_iov[1], &resp_buftype[1], cifs_sb); if (rc == 0) {
rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
used_len = le32_to_cpu(rsp->OutputBufferLength);
}
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
resp_buftype[1] = CIFS_NO_BUFFER;
memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
rc = 0;
/* Use a fudge factor of 256 bytes in case we collide * with a different set_EAs command.
*/ if (CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
used_len + ea_name_len + ea_value_len + 1) {
rc = -ENOSPC; goto sea_exit;
}
}
}
/* Open */
rqst[0].rq_iov = vars->open_iov;
rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
staticvoid
smb2_clear_stats(struct cifs_tcon *tcon)
{ int i;
for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
}
}
staticvoid
smb2_dump_share_caps(struct seq_file *m, struct cifs_tcon *tcon)
{
seq_puts(m, "\n\tShare Capabilities:"); if (tcon->capabilities & SMB2_SHARE_CAP_DFS)
seq_puts(m, " DFS,"); if (tcon->capabilities & SMB2_SHARE_CAP_CONTINUOUS_AVAILABILITY)
seq_puts(m, " CONTINUOUS AVAILABILITY,"); if (tcon->capabilities & SMB2_SHARE_CAP_SCALEOUT)
seq_puts(m, " SCALEOUT,"); if (tcon->capabilities & SMB2_SHARE_CAP_CLUSTER)
seq_puts(m, " CLUSTER,"); if (tcon->capabilities & SMB2_SHARE_CAP_ASYMMETRIC)
seq_puts(m, " ASYMMETRIC,"); if (tcon->capabilities == 0)
seq_puts(m, " None"); if (tcon->ss_flags & SSINFO_FLAGS_ALIGNED_DEVICE)
seq_puts(m, " Aligned,"); if (tcon->ss_flags & SSINFO_FLAGS_PARTITION_ALIGNED_ON_DEVICE)
seq_puts(m, " Partition Aligned,"); if (tcon->ss_flags & SSINFO_FLAGS_NO_SEEK_PENALTY)
seq_puts(m, " SSD,"); if (tcon->ss_flags & SSINFO_FLAGS_TRIM_ENABLED)
seq_puts(m, " TRIM-support,");
/* Creation time should not need to be updated on close */ if (file_inf.LastWriteTime)
inode_set_mtime_to_ts(inode,
cifs_NTtimeToUnix(file_inf.LastWriteTime)); if (file_inf.ChangeTime)
inode_set_ctime_to_ts(inode,
cifs_NTtimeToUnix(file_inf.ChangeTime)); if (file_inf.LastAccessTime)
inode_set_atime_to_ts(inode,
cifs_NTtimeToUnix(file_inf.LastAccessTime));
/* * i_blocks is not related to (i_size / i_blksize), * but instead 512 byte (2**9) size is required for * calculating num blocks.
*/ if (le64_to_cpu(file_inf.AllocationSize) > 4096)
inode->i_blocks =
(512 - 1 + le64_to_cpu(file_inf.AllocationSize)) >> 9;
/* End of file and Attributes should not have to be updated on close */
spin_unlock(&inode->i_lock); return rc;
}
/* Query */ if (qi.flags & PASSTHRU_FSCTL) { /* Can eventually relax perm check since server enforces too */ if (!capable(CAP_SYS_ADMIN)) {
rc = -EPERM; goto free_open_req;
}
rqst[1].rq_iov = &vars->io_iov[0];
rqst[1].rq_nvec = SMB2_IOCTL_IOV_SIZE;
rc = SMB2_ioctl_init(tcon, server, &rqst[1], COMPOUND_FID, COMPOUND_FID,
qi.info_type, buffer, qi.output_buffer_length,
CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
MAX_SMB2_CLOSE_RESPONSE_SIZE);
free_req1_func = SMB2_ioctl_free;
} elseif (qi.flags == PASSTHRU_SET_INFO) { /* Can eventually relax perm check since server enforces too */ if (!capable(CAP_SYS_ADMIN)) {
rc = -EPERM; goto free_open_req;
} if (qi.output_buffer_length < 8) {
rc = -EINVAL; goto free_open_req;
}
rqst[1].rq_iov = vars->si_iov;
rqst[1].rq_nvec = 1;
cifs_dbg(FYI, "%s: about to call request res key\n", __func__); /* Request a key from the server to identify the source of the copy */
rc = SMB2_request_res_key(xid, tlink_tcon(srcfile->tlink),
srcfile->fid.persistent_fid,
srcfile->fid.volatile_fid, pcchunk);
/* Note: request_res_key sets res_key null only if rc !=0 */ if (rc) goto cchunk_out;
/* For now array only one chunk long, will make more flexible later */
pcchunk->ChunkCount = cpu_to_le32(1);
pcchunk->Reserved = 0;
pcchunk->Reserved2 = 0;
/* * Check if this is the first request using these sizes, * (ie check if copy succeed once with original sizes * and check if the server gave us different sizes after * we already updated max sizes on previous request). * if not then why is the server returning an error now
*/ if ((chunks_copied != 0) || chunk_sizes_updated) goto cchunk_out;
/* Check that server is not asking us to grow size */ if (le32_to_cpu(retbuf->ChunkBytesWritten) <
tcon->max_bytes_chunk)
tcon->max_bytes_chunk =
le32_to_cpu(retbuf->ChunkBytesWritten); else goto cchunk_out; /* server gave us bogus size */
/* No need to change MaxChunks since already set to 1 */
chunk_sizes_updated = true;
} else goto cchunk_out;
}
/* Set or clear the SPARSE_FILE attribute based on value passed in setsparse */ staticbool smb2_set_sparse(constunsignedint xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, struct inode *inode, __u8 setsparse)
{ struct cifsInodeInfo *cifsi; int rc;
cifsi = CIFS_I(inode);
/* if file already sparse don't bother setting sparse again */ if ((cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && setsparse) returntrue; /* already sparse */
if (!(cifsi->cifsAttrs & FILE_ATTRIBUTE_SPARSE_FILE) && !setsparse) returntrue; /* already not sparse */
/* * Can't check for sparse support on share the usual way via the * FS attribute info (FILE_SUPPORTS_SPARSE_FILES) on the share * since Samba server doesn't set the flag on the share, yet * supports the set sparse FSCTL and returns sparse correctly * in the file attributes. If we fail setting sparse though we * mark that server does not support sparse files for this share * to avoid repeatedly sending the unsupported fsctl to server * if the file is repeatedly extended.
*/ if (tcon->broken_sparse_sup) returnfalse;
/* * If extending file more than one page make sparse. Many Linux fs * make files sparse by default when extending via ftruncate
*/
inode = d_inode(cfile->dentry);
/* server fileays advertise duplicate extent support with this flag */ if ((le32_to_cpu(tcon->fsAttrInfo.Attributes) &
FILE_SUPPORTS_BLOCK_REFCOUNTING) == 0) return -EOPNOTSUPP;
/* * Although also could set plausible allocation size (i_blocks) * here in addition to setting the file size, in reflink * it is likely that the target file is sparse. Its allocation * size will be queried on next revalidate, but it is important * to make sure that file's cached size is updated immediately
*/
netfs_resize_file(netfs_inode(inode), dest_off + len, true);
cifs_setsize(inode, dest_off + len);
}
rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
trgtfile->fid.volatile_fid,
FSCTL_DUPLICATE_EXTENTS_TO_FILE,
(char *)&dup_ext_buf, sizeof(struct duplicate_extents_to_file),
CIFSMaxBufSize, NULL,
&ret_data_len);
if (ret_data_len > 0)
cifs_dbg(FYI, "Non-zero response length in duplicate extents\n");
/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ #define GMT_TOKEN_SIZE 50
#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
/* * Input buffer contains (empty) struct smb_snapshot array with size filled in * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
*/ staticint
smb3_enum_snapshots(constunsignedint xid, struct cifs_tcon *tcon, struct cifsFileInfo *cfile, void __user *ioc_buf)
{ char *retbuf = NULL; unsignedint ret_data_len = 0; int rc;
u32 max_response_size; struct smb_snapshot_array snapshot_in;
/* * On the first query to enumerate the list of snapshots available * for this volume the buffer begins with 0 (number of snapshots * which can be returned is zero since at that point we do not know * how big the buffer needs to be). On the second query, * it (ret_data_len) is set to number of snapshots so we can * know to set the maximum response size larger (see below).
*/ if (get_user(ret_data_len, (unsignedint __user *)ioc_buf)) return -EFAULT;
/* * Note that for snapshot queries that servers like Azure expect that * the first query be minimal size (and just used to get the number/size * of previous versions) so response size must be specified as EXACTLY * sizeof(struct snapshot_array) which is 16 when rounded up to multiple * of eight bytes.
*/ if (ret_data_len == 0)
max_response_size = MIN_SNAPSHOT_ARRAY_SIZE; else
max_response_size = CIFSMaxBufSize;
rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
cfile->fid.volatile_fid,
FSCTL_SRV_ENUMERATE_SNAPSHOTS,
NULL, 0 /* no input data */, max_response_size,
(char **)&retbuf,
&ret_data_len);
cifs_dbg(FYI, "enum snapshots ioctl returned %d and ret buflen is %d\n",
rc, ret_data_len); if (rc) return rc;
/* * Check for min size, ie not large enough to fit even one GMT * token (snapshot). On the first ioctl some users may pass in * smaller size (or zero) to simply get the size of the array * so the user space caller can allocate sufficient memory * and retry the ioctl again with larger array size sufficient * to hold all of the snapshot GMT tokens on the second try.
*/ if (snapshot_in.snapshot_array_size < GMT_TOKEN_SIZE)
ret_data_len = sizeof(struct smb_snapshot_array);
/* * We return struct SRV_SNAPSHOT_ARRAY, followed by * the snapshot array (of 50 byte GMT tokens) each * representing an available previous version of the data
*/ if (ret_data_len > (snapshot_in.snapshot_array_size + sizeof(struct smb_snapshot_array)))
ret_data_len = snapshot_in.snapshot_array_size + sizeof(struct smb_snapshot_array);
if (copy_to_user(ioc_buf, retbuf, ret_data_len))
rc = -EFAULT;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.