list_for_each_entry(cfid, &cfids->entries, entry) { if (!strcmp(cfid->path, path)) { /* * If it doesn't have a lease it is either not yet * fully cached or it may be in the process of * being deleted due to a lease break.
*/ if (!cfid->time || !cfid->has_lease) { return NULL;
}
kref_get(&cfid->refcount); return cfid;
}
} if (lookup_only) { return NULL;
} if (cfids->num_entries >= max_cached_dirs) { return NULL;
}
cfid = init_cached_dir(path); if (cfid == NULL) { return NULL;
}
cfid->cfids = cfids;
cfids->num_entries++;
list_add(&cfid->entry, &cfids->entries);
cfid->on_list = true;
kref_get(&cfid->refcount); /* * Set @cfid->has_lease to true during construction so that the lease * reference can be put in cached_dir_lease_break() due to a potential * lease break right after the request is sent or while @cfid is still * being cached, or if a reconnection is triggered during construction. * Concurrent processes won't be to use it yet due to @cfid->time being * zero.
*/
cfid->has_lease = true;
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
cifs_sb->prepath) {
len = strlen(cifs_sb->prepath) + 1; if (unlikely(len > strlen(path))) return ERR_PTR(-EINVAL);
} return path + len;
}
/* * Open the and cache a directory handle. * If error then *cfid is not initialized.
*/ int open_cached_dir(unsignedint xid, struct cifs_tcon *tcon, constchar *path, struct cifs_sb_info *cifs_sb, bool lookup_only, struct cached_fid **ret_cfid)
{ struct cifs_ses *ses; struct TCP_Server_Info *server; struct cifs_open_parms oparms; struct smb2_create_rsp *o_rsp = NULL; struct smb2_query_info_rsp *qi_rsp = NULL; int resp_buftype[2]; struct smb_rqst rqst[2]; struct kvec rsp_iov[2]; struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; struct kvec qi_iov[1]; int rc, flags = 0;
__le16 *utf16_path = NULL;
u8 oplock = SMB2_OPLOCK_LEVEL_II; struct cifs_fid *pfid; struct dentry *dentry = NULL; struct cached_fid *cfid; struct cached_fids *cfids; constchar *npath; int retries = 0, cur_sleep = 1;
__le32 lease_flags = 0;
if (cifs_sb->root == NULL) return -ENOENT;
if (tcon == NULL) return -EOPNOTSUPP;
ses = tcon->ses;
cfids = tcon->cfids;
if (cfids == NULL) return -EOPNOTSUPP;
replay_again: /* reinitialize for possible replay */
flags = 0;
oplock = SMB2_OPLOCK_LEVEL_II;
server = cifs_pick_channel(ses);
if (!server->ops->new_lease_key) return -EIO;
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); if (!utf16_path) return -ENOMEM;
spin_lock(&cfids->cfid_list_lock);
cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); if (cfid == NULL) {
spin_unlock(&cfids->cfid_list_lock);
kfree(utf16_path); return -ENOENT;
} /* * Return cached fid if it is valid (has a lease and has a time). * Otherwise, it is either a new entry or laundromat worker removed it * from @cfids->entries. Caller will put last reference if the latter.
*/ if (cfid->has_lease && cfid->time) {
cfid->last_access_time = jiffies;
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
kfree(utf16_path); return 0;
}
spin_unlock(&cfids->cfid_list_lock);
pfid = &cfid->fid;
/* * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up * calling ->lookup() which already adds those through * build_path_from_dentry(). Also, do it earlier as we might reconnect * below when trying to send compounded request and then potentially * having a different prefix path (e.g. after DFS failover).
*/
npath = path_no_prefix(cifs_sb, path); if (IS_ERR(npath)) {
rc = PTR_ERR(npath); goto out;
}
/* * We do not hold the lock for the open because in case * SMB2_open needs to reconnect. * This is safe because no other thread will be able to get a ref * to the cfid until we have finished opening the file and (possibly) * acquired a lease.
*/ if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
cfid->time = jiffies;
cfid->last_access_time = jiffies;
spin_unlock(&cfids->cfid_list_lock); /* At this point the directory handle is fully cached */
rc = 0;
oshr_free:
SMB2_open_free(&rqst[0]);
SMB2_query_info_free(&rqst[1]);
free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
out: if (rc) {
spin_lock(&cfids->cfid_list_lock); if (cfid->on_list) {
list_del(&cfid->entry);
cfid->on_list = false;
cfids->num_entries--;
} if (cfid->has_lease) { /* * We are guaranteed to have two references at this * point. One for the caller and one for a potential * lease. Release one here, and the second below.
*/
cfid->has_lease = false;
close_cached_dir(cfid);
}
spin_unlock(&cfids->cfid_list_lock);
if (cfid->on_list) {
list_del(&cfid->entry);
cfid->on_list = false;
cfid->cfids->num_entries--;
}
spin_unlock(&cfid->cfids->cfid_list_lock);
dput(cfid->dentry);
cfid->dentry = NULL;
if (cfid->is_open) {
rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
cfid->fid.volatile_fid); if (rc) /* should we retry on -EBUSY or -EAGAIN? */
cifs_dbg(VFS, "close cached dir rc %d\n", rc);
}
done:
list_for_each_entry_safe(tmp_list, q, &entry, entry) {
list_del(&tmp_list->entry);
dput(tmp_list->dentry);
kfree(tmp_list);
}
/* Flush any pending work that will drop dentries */
flush_workqueue(cfid_put_wq);
}
/* * Invalidate all cached dirs when a TCON has been reset * due to a session loss.
*/ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
{ struct cached_fids *cfids = tcon->cfids; struct cached_fid *cfid, *q;
if (cfids == NULL) return;
/* * Mark all the cfids as closed, and move them to the cfids->dying list. * They'll be cleaned up later by cfids_invalidation_worker. Take * a reference to each cfid during this process.
*/
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
list_move(&cfid->entry, &cfids->dying);
cfids->num_entries--;
cfid->is_open = false;
cfid->on_list = false; if (cfid->has_lease) { /* * The lease was never cancelled from the server, * so steal that reference.
*/
cfid->has_lease = false;
} else
kref_get(&cfid->refcount);
} /* * Queue dropping of the dentries once locks have been dropped
*/ if (!list_empty(&cfids->dying))
queue_work(cfid_put_wq, &cfids->invalidation_work);
spin_unlock(&cfids->cfid_list_lock);
}
/* * Release the cached directory's dentry, and then queue work to drop cached * directory itself (closing on server if needed). * * Must be called with a reference to the cached_fid and a reference to the * tcon.
*/ staticvoid cached_dir_put_work(struct work_struct *work)
{ struct cached_fid *cfid = container_of(work, struct cached_fid,
put_work); struct dentry *dentry;
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) { if (cfid->has_lease &&
!memcmp(lease_key,
cfid->fid.lease_key,
SMB2_LEASE_KEY_SIZE)) {
cfid->has_lease = false;
cfid->time = 0; /* * We found a lease remove it from the list * so no threads can access it.
*/
list_del(&cfid->entry);
cfid->on_list = false;
cfids->num_entries--;
spin_lock(&cfids->cfid_list_lock); /* move cfids->dying to the local list */
list_cut_before(&entry, &cfids->dying, &cfids->dying);
spin_unlock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &entry, entry) {
list_del(&cfid->entry); /* Drop the ref-count acquired in invalidate_all_cached_dirs */
kref_put(&cfid->refcount, smb2_close_cached_fid);
}
}
dput(dentry); if (cfid->is_open) {
spin_lock(&cifs_tcp_ses_lock);
++cfid->tcon->tc_count;
trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
netfs_trace_tcon_ref_get_cached_laundromat);
spin_unlock(&cifs_tcp_ses_lock);
queue_work(serverclose_wq, &cfid->close_work);
} else /* * Drop the ref-count from above, either the lease-ref (if there * was one) or the extra one acquired.
*/
close_cached_dir(cfid);
}
queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
dir_cache_timeout * HZ);
}
/* * Called from tconInfoFree when we are tearing down the tcon. * There are no active users or open files/directories at this point.
*/ void free_cached_dirs(struct cached_fids *cfids)
{ struct cached_fid *cfid, *q;
LIST_HEAD(entry);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.