/** * struct p9_fd_opts - per-transport options * @rfd: file descriptor for reading (trans=fd) * @wfd: file descriptor for writing (trans=fd) * @port: port to connect to (trans=tcp) * @privport: port is privileged
*/
struct p9_fd_opts { int rfd; int wfd;
u16 port; bool privport;
};
/* * Option Parsing (code inspired by NFS code) * - a little lazy - parse all fd-transport options
*/
enum { /* Options that take integer arguments */
Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, /* Options that take no arguments */
Opt_privport,
};
/** * struct p9_conn - fd mux connection state information * @mux_list: list link for mux to manage multiple connections (?) * @client: reference to client instance for this connection * @err: error state * @req_lock: lock protecting req_list and requests statuses * @req_list: accounting for requests which have been sent * @unsent_req_list: accounting for requests that haven't been sent * @rreq: read request * @wreq: write request * @tmp_buf: temporary buffer to read in header * @rc: temporary fcall for reading current frame * @wpos: write position for current frame * @wsize: amount of data to write for current frame * @wbuf: current write buffer * @poll_pending_link: pending links to be polled per conn * @poll_wait: array of wait_q's for various worker threads * @pt: poll state * @rq: current read work * @wq: current write work * @wsched: ???? *
*/
/** * struct p9_trans_fd - transport state * @rd: reference to file to read from * @wr: reference of file to write to * @conn: connection state reference *
*/
if (m->rc.size > m->rreq->rc.capacity) {
p9_debug(P9_DEBUG_ERROR, "requested packet size too big: %d for tag %d with capacity %zd\n",
m->rc.size, m->rc.tag, m->rreq->rc.capacity);
err = -EIO; goto error;
}
if (!m->rreq->rc.sdata) {
p9_debug(P9_DEBUG_ERROR, "No recv fcall for tag %d (req %p), disconnecting!\n",
m->rc.tag, m->rreq);
p9_req_put(m->client, m->rreq);
m->rreq = NULL;
err = -EIO; goto error;
}
m->rc.sdata = m->rreq->rc.sdata;
memcpy(m->rc.sdata, m->tmp_buf, m->rc.capacity);
m->rc.capacity = m->rc.size;
}
/* packet is read in * not an else because some packets (like clunk) have no payload
*/ if ((m->rreq) && (m->rc.offset == m->rc.capacity)) {
p9_debug(P9_DEBUG_TRANS, "got new packet\n");
m->rreq->rc.size = m->rc.offset;
spin_lock(&m->req_lock); if (m->rreq->status == REQ_STATUS_SENT) {
list_del(&m->rreq->req_list);
p9_client_cb(m->client, m->rreq, REQ_STATUS_RCVD);
} elseif (m->rreq->status == REQ_STATUS_FLSHD) { /* Ignore replies associated with a cancelled request. */
p9_debug(P9_DEBUG_TRANS, "Ignore replies associated with a cancelled request\n");
} else {
spin_unlock(&m->req_lock);
p9_debug(P9_DEBUG_ERROR, "Request tag %d errored out while we were reading the reply\n",
m->rc.tag);
err = -EIO; goto error;
}
spin_unlock(&m->req_lock);
m->rc.sdata = NULL;
m->rc.offset = 0;
m->rc.capacity = 0;
p9_req_put(m->client, m->rreq);
m->rreq = NULL;
}
end_clear:
clear_bit(Rworksched, &m->wsched);
if (!list_empty(&m->req_list)) { if (test_and_clear_bit(Rpending, &m->wsched))
n = EPOLLIN; else
n = p9_fd_poll(m->client, NULL, NULL);
if ((n & EPOLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
schedule_work(&m->rq);
}
}
if (m->wsize || !list_empty(&m->unsent_req_list)) { if (test_and_clear_bit(Wpending, &m->wsched))
n = EPOLLOUT; else
n = p9_fd_poll(m->client, NULL, NULL);
if ((n & EPOLLOUT) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
schedule_work(&m->wq);
}
}
spin_lock_irqsave(&p9_poll_lock, flags); if (list_empty(&m->poll_pending_link))
list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
spin_unlock_irqrestore(&p9_poll_lock, flags);
schedule_work(&p9_poll_work); return 1;
}
/** * p9_pollwait - add poll task to the wait queue * @filp: file pointer being polled * @wait_address: wait_q to block on * @p: poll state * * called by files poll operation to add v9fs-poll task to files wait queue
*/
/** * p9_conn_create - initialize the per-session mux data * @client: client instance * * Note: Creates the polling task if this is the first session.
*/
n = p9_fd_poll(m->client, NULL, &err); if (n & (EPOLLERR | EPOLLHUP | EPOLLNVAL)) {
p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
p9_conn_cancel(m, err);
}
if (n & EPOLLIN) {
set_bit(Rpending, &m->wsched);
p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); if (!test_and_set_bit(Rworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
schedule_work(&m->rq);
}
}
if (n & EPOLLOUT) {
set_bit(Wpending, &m->wsched);
p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
!test_and_set_bit(Wworksched, &m->wsched)) {
p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
schedule_work(&m->wq);
}
}
}
/** * p9_fd_request - send 9P request * The function can sleep until the request is scheduled for sending. * The function can be interrupted. Return from the function is not * a guarantee that the request is sent successfully. * * @client: client instance * @req: request to be sent *
*/
spin_lock(&m->req_lock); /* Ignore cancelled request if status changed since the request was * processed in p9_client_flush()
*/ if (req->status != REQ_STATUS_SENT) {
spin_unlock(&m->req_lock); return 0;
}
/* we haven't received a response for oldreq, * remove it from the list.
*/
list_del(&req->req_list);
WRITE_ONCE(req->status, REQ_STATUS_FLSHD);
spin_unlock(&m->req_lock);
/** * parse_opts - parse mount options into p9_fd_opts structure * @params: options string passed from mount * @opts: fd transport-specific structure to parse options into * * Returns 0 upon success, -ERRNO upon failure
*/
tmp_options = kstrdup(params, GFP_KERNEL); if (!tmp_options) {
p9_debug(P9_DEBUG_ERROR, "failed to allocate copy of option string\n"); return -ENOMEM;
}
options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) { int token; int r; if (!*p) continue;
token = match_token(p, tokens, args); if ((token != Opt_err) && (token != Opt_privport)) {
r = match_int(&args[0], &option); if (r < 0) {
p9_debug(P9_DEBUG_ERROR, "integer field, but no integer?\n"); continue;
}
} switch (token) { case Opt_port:
opts->port = option; break; case Opt_rfdno:
opts->rfd = option; break; case Opt_wfdno:
opts->wfd = option; break; case Opt_privport:
opts->privport = true; break; default: continue;
}
}
kfree(tmp_options); return 0;
}
staticint p9_fd_open(struct p9_client *client, int rfd, int wfd)
{ struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
GFP_KERNEL); if (!ts) return -ENOMEM;
ts->rd = fget(rfd); if (!ts->rd) goto out_free_ts; if (!(ts->rd->f_mode & FMODE_READ)) goto out_put_rd; /* Prevent workers from hanging on IO when fd is a pipe. * It's technically possible for userspace or concurrent mounts to * modify this flag concurrently, which will likely result in a * broken filesystem. However, just having bad flags here should * not crash the kernel or cause any other sort of bug, so mark this * particular data race as intentional so that tooling (like KCSAN) * can allow it and detect further problems.
*/
data_race(ts->rd->f_flags |= O_NONBLOCK);
ts->wr = fget(wfd); if (!ts->wr) goto out_put_rd; if (!(ts->wr->f_mode & FMODE_WRITE)) goto out_put_wr;
data_race(ts->wr->f_flags |= O_NONBLOCK);
/** * p9_poll_workfn - poll worker thread * @work: work queue * * polls all v9fs transports for new events and queues the appropriate * work to the work queue *
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.