// SPDX-License-Identifier: LGPL-2.1 /* * * Copyright (C) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * * Common Internet FileSystem (CIFS) client *
*/
/* Note that BB means BUGBUG (ie something to fix eventually) */
/* * DOS dates from 1980/1/1 through 2107/12/31 * Protocol specifications indicate the range should be to 119, which * limits maximum year to 2099. But this range has not been checked.
*/ #define SMB_DATE_MAX (127<<9 | 12<<5 | 31) #define SMB_DATE_MIN (0<<9 | 1<<5 | 1) #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
/* * Global transaction id (XID) information
*/ unsignedint GlobalCurrentXid; /* protected by GlobalMid_Lock */ unsignedint GlobalTotalActiveXid; /* prot by GlobalMid_Lock */ unsignedint GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
module_param(disable_legacy_dialects, bool, 0644);
MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be " "helpful to restrict the ability to " "override the default dialects (SMB2.1, " "SMB3 and SMB3.02) on mount with old " "dialects (CIFS/SMB1 and SMB2) since " "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker" " and less secure. Default: n/N/0");
/* * Bumps refcount for cifs super block. * Note that it should be only called if a reference to VFS super block is * already held, e.g. in open-type syscalls context. Otherwise it can race with * atomic_dec_and_test in deactivate_locked_super.
*/ void
cifs_sb_active(struct super_block *sb)
{ struct cifs_sb_info *server = CIFS_SB(sb);
if (atomic_inc_return(&server->active) == 1)
atomic_inc(&sb->s_active);
}
/* * Some very old servers like DOS and OS/2 used 2 second granularity * (while all current servers use 100ns granularity - see MS-DTYP) * but 1 second is the maximum allowed granularity for the VFS * so for old servers set time granularity to 1 second while for * everything else (current servers) set it to 100ns.
*/ if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
((tcon->ses->capabilities &
tcon->ses->server->vals->cap_nt_find) == 0) &&
!tcon->unix_ext) {
sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
sb->s_time_min = ts.tv_sec;
ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
cpu_to_le16(SMB_TIME_MAX), 0);
sb->s_time_max = ts.tv_sec;
} else { /* * Almost every server, including all SMB2+, uses DCE TIME * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
*/
sb->s_time_gran = 100;
ts = cifs_NTtimeToUnix(0);
sb->s_time_min = ts.tv_sec;
ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
sb->s_time_max = ts.tv_sec;
}
sb->s_magic = CIFS_SUPER_MAGIC;
sb->s_op = &cifs_super_ops;
sb->s_xattr = cifs_xattr_handlers;
rc = super_setup_bdi(sb); if (rc) goto out_no_root; /* tune readahead according to rsize if readahead size not set on mount */ if (cifs_sb->ctx->rsize == 0)
cifs_sb->ctx->rsize =
tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx); if (cifs_sb->ctx->rasize)
sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE; else
sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
buf->f_fsid.val[0] = tcon->vol_serial_number; /* are using part of create time for more randomness, see man statfs */
buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { if ((mask & MAY_EXEC) && !execute_ok(inode)) return -EACCES; else return 0;
} else/* file mode might have been restricted at mount time on the client (above and beyond ACL on servers) for servers which do not support setting and viewing mode bits,
so allowing client to check permissions is useful */ return generic_permission(&nop_mnt_idmap, inode, mask);
}
staticstruct inode *
cifs_alloc_inode(struct super_block *sb)
{ struct cifsInodeInfo *cifs_inode;
cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL); if (!cifs_inode) return NULL;
cifs_inode->cifsAttrs = ATTR_ARCHIVE; /* default */
cifs_inode->time = 0; /* * Until the file is open and we have gotten oplock info back from the * server, can not assume caching of file data or metadata.
*/
cifs_set_oplock_level(cifs_inode, 0);
cifs_inode->lease_granted = false;
cifs_inode->flags = 0;
spin_lock_init(&cifs_inode->writers_lock);
cifs_inode->writers = 0;
cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
cifs_inode->netfs.remote_i_size = 0;
cifs_inode->uniqueid = 0;
cifs_inode->createtime = 0;
cifs_inode->epoch = 0;
spin_lock_init(&cifs_inode->open_file_lock);
generate_random_uuid(cifs_inode->lease_key);
cifs_inode->symlink_target = NULL;
/* * Can not set i_flags here - they get immediately overwritten to zero * by the VFS.
*/ /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
INIT_LIST_HEAD(&cifs_inode->openFileList);
INIT_LIST_HEAD(&cifs_inode->llist);
INIT_LIST_HEAD(&cifs_inode->deferred_closes);
spin_lock_init(&cifs_inode->deferred_lock); return &cifs_inode->netfs.inode;
}
/* * cifs_show_devname() is used so we show the mount device name with correct * format (e.g. forward slashes vs. back slashes) in /proc/mounts
*/ staticint cifs_show_devname(struct seq_file *m, struct dentry *root)
{ struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
if (devname == NULL)
seq_puts(m, "none"); else {
convert_delimiter(devname, '/'); /* escape all spaces in share names */
seq_escape(m, devname, " \t");
kfree(devname);
} return 0;
}
/* * cifs_show_options() is for displaying mount options in /proc/mounts. * Not all settable options are displayed but most of the important * ones are.
*/ staticint
cifs_show_options(struct seq_file *s, struct dentry *root)
{ struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb); struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); struct sockaddr *srcaddr;
srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
if (!tcon->unix_ext)
seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
cifs_sb->ctx->file_mode,
cifs_sb->ctx->dir_mode); if (cifs_sb->ctx->iocharset)
seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset); if (tcon->ses->unicode == 0)
seq_puts(s, ",nounicode"); elseif (tcon->ses->unicode == 1)
seq_puts(s, ",unicode"); if (tcon->seal)
seq_puts(s, ",seal"); elseif (tcon->ses->server->ignore_signature)
seq_puts(s, ",signloosely"); if (tcon->nocase)
seq_puts(s, ",nocase"); if (tcon->nodelete)
seq_puts(s, ",nodelete"); if (cifs_sb->ctx->no_sparse)
seq_puts(s, ",nosparse"); if (tcon->local_lease)
seq_puts(s, ",locallease"); if (tcon->retry)
seq_puts(s, ",hard"); else
seq_puts(s, ",soft"); if (tcon->use_persistent)
seq_puts(s, ",persistenthandles"); elseif (tcon->use_resilient)
seq_puts(s, ",resilienthandles"); if (tcon->posix_extensions)
seq_puts(s, ",posix"); elseif (tcon->unix_ext)
seq_puts(s, ",unix"); else
seq_puts(s, ",nounix"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
seq_puts(s, ",nodfs"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_puts(s, ",posixpaths"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
seq_puts(s, ",setuids"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
seq_puts(s, ",idsfromsid"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
seq_puts(s, ",serverino"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
seq_puts(s, ",rwpidforward"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
seq_puts(s, ",forcemand"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
seq_puts(s, ",nouser_xattr"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
seq_puts(s, ",mapchars"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
seq_puts(s, ",mapposix"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
seq_puts(s, ",sfu"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
seq_puts(s, ",nobrl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
seq_puts(s, ",nohandlecache"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
seq_puts(s, ",modefromsid"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
seq_puts(s, ",cifsacl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
seq_puts(s, ",dynperm"); if (root->d_sb->s_flags & SB_POSIXACL)
seq_puts(s, ",acl"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
seq_puts(s, ",mfsymlinks"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
seq_puts(s, ",fsc"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
seq_puts(s, ",nostrictsync"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
seq_puts(s, ",noperm"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
seq_printf(s, ",backupuid=%u",
from_kuid_munged(&init_user_ns,
cifs_sb->ctx->backupuid)); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
seq_printf(s, ",backupgid=%u",
from_kgid_munged(&init_user_ns,
cifs_sb->ctx->backupgid));
seq_show_option(s, "reparse",
cifs_reparse_type_str(cifs_sb->ctx->reparse_type)); if (cifs_sb->ctx->nonativesocket)
seq_puts(s, ",nonativesocket"); else
seq_puts(s, ",nativesocket");
seq_show_option(s, "symlink",
cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize); if (cifs_sb->ctx->rasize)
seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize); if (tcon->ses->server->min_offload)
seq_printf(s, ",esize=%u", tcon->ses->server->min_offload); if (tcon->ses->server->retrans)
seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
seq_printf(s, ",echo_interval=%lu",
tcon->ses->server->echo_interval / HZ);
/* Only display the following if overridden on mount */ if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits); if (tcon->ses->server->tcp_nodelay)
seq_puts(s, ",tcpnodelay"); if (tcon->ses->server->noautotune)
seq_puts(s, ",noautotune"); if (tcon->ses->server->noblocksnd)
seq_puts(s, ",noblocksend"); if (tcon->ses->server->nosharesock)
seq_puts(s, ",nosharesock");
if (tcon->snapshot_time)
seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); if (tcon->handle_timeout)
seq_printf(s, ",handletimeout=%u", tcon->handle_timeout); if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
/* * Display file and directory attribute timeout in seconds. * If file and directory attribute timeout the same then actimeo * was likely specified on mount
*/ if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ); else {
seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
}
seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
if (tcon->ses->chan_max > 1)
seq_printf(s, ",multichannel,max_channels=%zu",
tcon->ses->chan_max);
spin_lock(&cifs_tcp_ses_lock);
spin_lock(&tcon->tc_lock);
trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
netfs_trace_tcon_ref_see_umount); if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { /* we have other mounts to same share or we have already tried to umount this and woken up
all waiting network requests, nothing to do */
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock); return;
} /* * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
*/
spin_unlock(&tcon->tc_lock);
spin_unlock(&cifs_tcp_ses_lock);
cifs_close_all_deferred_files(tcon); /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ /* cancel_notify_requests(tcon); */ if (tcon->ses && tcon->ses->server) {
cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
wake_up_all(&tcon->ses->server->request_q);
wake_up_all(&tcon->ses->server->response_q);
msleep(1); /* yield */ /* we have to kick the requests once more */
wake_up_all(&tcon->ses->server->response_q);
msleep(1);
}
staticconststruct super_operations cifs_super_ops = {
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
.write_inode = cifs_write_inode,
.free_inode = cifs_free_inode,
.drop_inode = cifs_drop_inode,
.evict_inode = cifs_evict_inode, /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
.show_devname = cifs_show_devname, /* .delete_inode = cifs_delete_inode, */ /* Do not need above
function unless later we add lazy close of inodes or unless the
kernel forgets to call us with the same number of releases (closes)
as opens */
.show_options = cifs_show_options,
.umount_begin = cifs_umount_begin,
.freeze_fs = cifs_freeze, #ifdef CONFIG_CIFS_STATS2
.show_stats = cifs_show_stats, #endif
};
/* * Get root dentry from superblock according to prefix path mount option. * Return dentry with refcount + 1 on success and NULL otherwise.
*/ staticstruct dentry *
cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
{ struct dentry *dentry; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); char *full_path = NULL; char *s, *p; char sep;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) return dget(sb->s_root);
/* * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate * the cached file length
*/ if (whence != SEEK_SET && whence != SEEK_CUR) { int rc; struct inode *inode = file_inode(file);
/* * We need to be sure that all dirty pages are written and the * server has the newest file length.
*/ if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
inode->i_mapping->nrpages != 0) {
rc = filemap_fdatawait(inode->i_mapping); if (rc) {
mapping_set_error(inode->i_mapping, rc); return rc;
}
} /* * Some applications poll for the file length in this strange * way so we must seek to end on non-oplocked files by * setting the revalidate time to zero.
*/
CIFS_I(inode)->time = 0;
staticint
cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv)
{ /* * Note that this is called by vfs setlease with i_lock held to * protect *lease from going away.
*/ struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data;
/* Check if file is oplocked if this is request for new lease */ if (arg == F_UNLCK ||
((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode)))) return generic_setlease(file, arg, lease, priv); elseif (tlink_tcon(cfile->tlink)->local_lease &&
!CIFS_CACHE_READ(CIFS_I(inode))) /* * If the server claims to support oplock on this file, then we * still need to check oplock even if the local_lease mount * option is set, but there are servers which do not support * oplock for which this mount option may be useful if the user * knows that the file won't be changed on the server by anyone * else.
*/ return generic_setlease(file, arg, lease, priv); else return -EAGAIN;
}
/* * Flush out either the folio that overlaps the beginning of a range in which * pos resides or the folio that overlaps the end of a range unless that folio * is entirely within the range we're going to invalidate. We extend the flush * bounds to encompass the folio.
*/ staticint cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend, bool first)
{ struct folio *folio; unsignedlonglong fpos, fend;
pgoff_t index = pos / PAGE_SIZE;
size_t size; int rc = 0;
folio = filemap_get_folio(inode->i_mapping, index); if (IS_ERR(folio)) return 0;
/* * Note: cifs case is easier than btrfs since server responsible for * checks for proper open modes and file type and if it wants * server could even support copy of range where source = target
*/
lock_two_nondirectories(target_inode, src_inode);
if (len == 0)
len = src_inode->i_size - off;
cifs_dbg(FYI, "clone range\n");
/* Flush the source buffer */
rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
off + len - 1); if (rc) goto unlock;
/* The server-side copy will fail if the source crosses the EOF marker. * Advance the EOF marker after the flush above to the end of the range * if it's short of that.
*/ if (src_cifsi->netfs.remote_i_size < off + len) {
rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); if (rc < 0) goto unlock;
}
/* Flush the folios at either end of the destination range to prevent * accidental loss of dirty data outside of the range.
*/
fstart = destoff;
fend = destend;
/* Discard all the folios that overlap the destination region. */
cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
rc = -EOPNOTSUPP; if (target_tcon->ses->server->ops->duplicate_extents) {
rc = target_tcon->ses->server->ops->duplicate_extents(xid,
smb_file_src, smb_file_target, off, len, destoff); if (rc == 0 && new_size > old_size) {
truncate_setsize(target_inode, new_size);
fscache_resize_cookie(cifs_inode_cookie(target_inode),
new_size);
} elseif (rc == -EOPNOTSUPP) { /* * copy_file_range syscall man page indicates EINVAL * is returned e.g when "fd_in and fd_out refer to the * same file and the source and target ranges overlap." * Test generic/157 was what showed these cases where * we need to remap EOPNOTSUPP to EINVAL
*/ if (off >= src_inode->i_size) {
rc = -EINVAL;
} elseif (src_inode == target_inode) { if (off + len > destoff)
rc = -EINVAL;
}
} if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
target_cifsi->netfs.zero_point = new_size;
}
/* force revalidate of size and timestamps of target file now
that target is updated on the server */
CIFS_I(target_inode)->time = 0;
unlock: /* although unlocking in the reverse order from locking is not
strictly necessary here it is a little cleaner to be consistent */
unlock_two_nondirectories(src_inode, target_inode);
out:
free_xid(xid); return rc < 0 ? rc : len;
}
if (src_tcon->ses != target_tcon->ses) {
cifs_dbg(FYI, "source and target of copy not on same server\n"); goto out;
}
rc = -EOPNOTSUPP; if (!target_tcon->ses->server->ops->copychunk_range) goto out;
/* * Note: cifs case is easier than btrfs since server responsible for * checks for proper open modes and file type and if it wants * server could even support copy of range where source = target
*/
lock_two_nondirectories(target_inode, src_inode);
cifs_dbg(FYI, "about to flush pages\n");
rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
off + len - 1); if (rc) goto unlock;
/* The server-side copy will fail if the source crosses the EOF marker. * Advance the EOF marker after the flush above to the end of the range * if it's short of that.
*/ if (src_cifsi->netfs.remote_i_size < off + len) {
rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len); if (rc < 0) goto unlock;
}
/* Flush and invalidate all the folios in the destination region. If * the copy was successful, then some of the flush is extra overhead, * but we need to allow for the copy failing in some way (eg. ENOSPC).
*/
rc = filemap_invalidate_inode(target_inode, true, destoff, destoff + len - 1); if (rc) goto unlock;
/* force revalidate of size and timestamps of target file now * that target is updated on the server
*/
CIFS_I(target_inode)->time = 0;
unlock: /* although unlocking in the reverse order from locking is not * strictly necessary here it is a little cleaner to be consistent
*/
unlock_two_nondirectories(src_inode, target_inode);
out: return rc;
}
/* * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync() * is a dummy operation.
*/ staticint cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
{
cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
file, datasync);
staticvoid
cifs_destroy_inodecache(void)
{ /* * Make sure all delayed rcu free inodes are flushed before we * destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(cifs_inode_cachep);
}
staticint
cifs_init_request_bufs(void)
{ /* * SMB2 maximum header size is bigger than CIFS one - no problems to * allocate some more bytes for CIFS.
*/
size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
if (CIFSMaxBufSize < 8192) { /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
Unicode path name has to fit in any SMB/CIFS path based frames */
CIFSMaxBufSize = 8192;
} elseif (CIFSMaxBufSize > 1024*127) {
CIFSMaxBufSize = 1024 * 127;
} else {
CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
} /* cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", CIFSMaxBufSize, CIFSMaxBufSize);
*/
cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
CIFSMaxBufSize + max_hdr_size, 0,
SLAB_HWCACHE_ALIGN, 0,
CIFSMaxBufSize + max_hdr_size,
NULL); if (cifs_req_cachep == NULL) return -ENOMEM;
if (cifs_min_rcv < 1)
cifs_min_rcv = 1; elseif (cifs_min_rcv > 64) {
cifs_min_rcv = 64;
cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
}
if (cifs_req_poolp == NULL) {
kmem_cache_destroy(cifs_req_cachep); return -ENOMEM;
} /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and almost all handle based requests (but not write response, nor is it sufficient for path based requests). A smaller size would have been more efficient (compacting multiple slab items on one 4k page) for the case in which debug was on, but this larger size allows more SMBs to use small buffer alloc and is still much more efficient to alloc 1 per page off the slab compared to 17K (5page)
alloc of large cifs buffers even when page debugging is on */
cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); if (cifs_sm_req_cachep == NULL) {
mempool_destroy(cifs_req_poolp);
kmem_cache_destroy(cifs_req_cachep); return -ENOMEM;
}
if (cifs_min_small < 2)
cifs_min_small = 2; elseif (cifs_min_small > 256) {
cifs_min_small = 256;
cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
}
/* 3 is a reasonable minimum number of simultaneous operations */
cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); if (cifs_mid_poolp == NULL) {
kmem_cache_destroy(cifs_mid_cachep); return -ENOMEM;
}
if (cifs_max_pending < 2) {
cifs_max_pending = 2;
cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
} elseif (cifs_max_pending > CIFS_MAX_REQ) {
cifs_max_pending = CIFS_MAX_REQ;
cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
CIFS_MAX_REQ);
}
/* Limit max to about 18 hours, and setting to zero disables directory entry caching */ if (dir_cache_timeout > 65000) {
dir_cache_timeout = 65000;
cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
}
/* * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3) * so that we don't launch too many worker threads but * Documentation/core-api/workqueue.rst recommends setting it to 0
*/
/* WQ_UNBOUND allows decrypt tasks to run on any CPU */
decrypt_wq = alloc_workqueue("smb3decryptd",
WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0); if (!decrypt_wq) {
rc = -ENOMEM; goto out_destroy_cifsiod_wq;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.