/** * struct tb_ctl - Thunderbolt control channel * @nhi: Pointer to the NHI structure * @tx: Transmit ring * @rx: Receive ring * @frame_pool: DMA pool for control messages * @rx_packets: Received control messages * @request_queue_lock: Lock protecting @request_queue * @request_queue: List of outstanding requests * @running: Is the control channel running at the moment * @timeout_msec: Default timeout for non-raw control messages * @callback: Callback called when hotplug message is received * @callback_data: Data passed to @callback * @index: Domain number. This will be output with the trace record.
*/ struct tb_ctl { struct tb_nhi *nhi; struct tb_ring *tx; struct tb_ring *rx;
/** * tb_cfg_request_alloc() - Allocates a new config request * * This is refcounted object so when you are done with this, call * tb_cfg_request_put() to it.
*/ struct tb_cfg_request *tb_cfg_request_alloc(void)
{ struct tb_cfg_request *req;
req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return NULL;
kref_init(&req->kref);
return req;
}
/** * tb_cfg_request_get() - Increase refcount of a request * @req: Request whose refcount is increased
*/ void tb_cfg_request_get(struct tb_cfg_request *req)
{
mutex_lock(&tb_cfg_request_lock);
kref_get(&req->kref);
mutex_unlock(&tb_cfg_request_lock);
}
/** * tb_cfg_request_put() - Decrease refcount and possibly release the request * @req: Request whose refcount is decreased * * Call this function when you are done with the request. When refcount * goes to %0 the object is released.
*/ void tb_cfg_request_put(struct tb_cfg_request *req)
{
mutex_lock(&tb_cfg_request_lock);
kref_put(&req->kref, tb_cfg_request_destroy);
mutex_unlock(&tb_cfg_request_lock);
}
staticint check_config_address(struct tb_cfg_address addr, enum tb_cfg_space space, u32 offset,
u32 length)
{ if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero)) return -EIO; if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
space, addr.space)) return -EIO; if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
offset, addr.offset)) return -EIO; if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
length, addr.length)) return -EIO; /* * We cannot check addr->port as it is set to the upstream port of the * sender.
*/ return 0;
}
if (pkg->frame.eof == TB_CFG_PKG_ERROR) return decode_error(pkg);
res.response_port = 0; /* will be updated later for cfg_read/write */
res.response_route = tb_cfg_get_route(header);
res.err = check_header(pkg, len, type, route); return res;
}
staticvoid tb_cfg_print_error(struct tb_ctl *ctl, enum tb_cfg_space space, conststruct tb_cfg_result *res)
{
WARN_ON(res->err != 1); switch (res->tb_error) { case TB_CFG_ERROR_PORT_NOT_CONNECTED: /* Port is not connected. This can happen during surprise
* removal. Do not warn. */ return; case TB_CFG_ERROR_INVALID_CONFIG_SPACE: /* * Invalid cfg_space/offset/length combination in * cfg_read/cfg_write.
*/
tb_ctl_dbg_once(ctl, "%llx:%x: invalid config space (%u) or offset\n",
res->response_route, res->response_port, space); return; case TB_CFG_ERROR_NO_SUCH_PORT: /* * - The route contains a non-existent port. * - The route contains a non-PHY port (e.g. PCIe). * - The port in cfg_read/cfg_write does not exist.
*/
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
res->response_route, res->response_port); return; case TB_CFG_ERROR_LOOP:
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
res->response_route, res->response_port); return; case TB_CFG_ERROR_LOCK:
tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n",
res->response_route, res->response_port); return; default: /* 5,6,7,9 and 11 are also valid error codes */
tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
res->response_route, res->response_port); return;
}
}
staticvoid tb_ctl_rx_submit(struct ctl_pkg *pkg)
{
tb_ring_rx(pkg->ctl->rx, &pkg->frame); /* * We ignore failures during stop. * All rx packets are referenced * from ctl->rx_packets, so we do * not loose them.
*/
}
if (pkg->frame.eof != TB_CFG_PKG_ERROR) returnfalse;
switch (error->error) { case TB_CFG_ERROR_LINK_ERROR: case TB_CFG_ERROR_HEC_ERROR_DETECTED: case TB_CFG_ERROR_FLOW_CONTROL_ERROR: case TB_CFG_ERROR_DP_BW: case TB_CFG_ERROR_ROP_CMPLT: case TB_CFG_ERROR_POP_CMPLT: case TB_CFG_ERROR_PCIE_WAKE: case TB_CFG_ERROR_DP_CON_CHANGE: case TB_CFG_ERROR_DPTX_DISCOVERY: case TB_CFG_ERROR_LINK_RECOVERY: case TB_CFG_ERROR_ASYM_LINK: returntrue;
switch (frame->eof) { case TB_CFG_PKG_READ: case TB_CFG_PKG_WRITE: case TB_CFG_PKG_ERROR: case TB_CFG_PKG_OVERRIDE: case TB_CFG_PKG_RESET: if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
tb_ctl_err(pkg->ctl, "RX: checksum mismatch, dropping packet\n"); goto rx;
} if (tb_async_error(pkg)) {
tb_ctl_handle_event(pkg->ctl, frame->eof,
pkg, frame->size); goto rx;
} break;
case TB_CFG_PKG_EVENT: case TB_CFG_PKG_XDOMAIN_RESP: case TB_CFG_PKG_XDOMAIN_REQ: if (*(__be32 *)(pkg->buffer + frame->size) != crc32) {
tb_ctl_err(pkg->ctl, "RX: checksum mismatch, dropping packet\n"); goto rx;
}
fallthrough; case TB_CFG_PKG_ICM_EVENT: if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) goto rx; break;
default: break;
}
/* * The received packet will be processed only if there is an * active request and that the packet is what is expected. This * prevents packets such as replies coming after timeout has * triggered from messing with the active requests.
*/
req = tb_cfg_request_find(pkg->ctl, pkg);
/** * tb_cfg_request() - Start control request not waiting for it to complete * @ctl: Control channel to use * @req: Request to start * @callback: Callback called when the request is completed * @callback_data: Data to be passed to @callback * * This queues @req on the given control channel without waiting for it * to complete. When the request completes @callback is called.
*/ int tb_cfg_request(struct tb_ctl *ctl, struct tb_cfg_request *req, void (*callback)(void *), void *callback_data)
{ int ret;
/** * tb_cfg_request_cancel() - Cancel a control request * @req: Request to cancel * @err: Error to assign to the request * * This function can be used to cancel ongoing request. It will wait * until the request is not active anymore.
*/ void tb_cfg_request_cancel(struct tb_cfg_request *req, int err)
{
set_bit(TB_CFG_REQUEST_CANCELED, &req->flags);
schedule_work(&req->work);
wait_event(tb_cfg_request_cancel_queue, !tb_cfg_request_is_active(req));
req->result.err = err;
}
/** * tb_cfg_request_sync() - Start control request and wait until it completes * @ctl: Control channel to use * @req: Request to start * @timeout_msec: Timeout how long to wait @req to complete * * Starts a control request and waits until it completes. If timeout * triggers the request is canceled before function returns. Note the * caller needs to make sure only one message for given switch is active * at a time.
*/ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, struct tb_cfg_request *req, int timeout_msec)
{ unsignedlong timeout = msecs_to_jiffies(timeout_msec); struct tb_cfg_result res = { 0 };
DECLARE_COMPLETION_ONSTACK(done); int ret;
ret = tb_cfg_request(ctl, req, tb_cfg_request_complete, &done); if (ret) {
res.err = ret; return res;
}
if (!wait_for_completion_timeout(&done, timeout))
tb_cfg_request_cancel(req, -ETIMEDOUT);
flush_work(&req->work);
return req->result;
}
/* public interface, alloc/start/stop/free */
/** * tb_ctl_alloc() - allocate a control channel * @nhi: Pointer to NHI * @index: Domain number * @timeout_msec: Default timeout used with non-raw control messages * @cb: Callback called for plug events * @cb_data: Data passed to @cb * * cb will be invoked once for every hot plug event. * * Return: Returns a pointer on success or NULL on failure.
*/ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
event_cb cb, void *cb_data)
{ int i; struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); if (!ctl) return NULL;
for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl); if (!ctl->rx_packets[i]) goto err;
ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
}
/** * tb_ctl_free() - free a control channel * @ctl: Control channel to free * * Must be called after tb_ctl_stop. * * Must NOT be called from ctl->callback.
*/ void tb_ctl_free(struct tb_ctl *ctl)
{ int i;
if (!ctl) return;
if (ctl->rx)
tb_ring_free(ctl->rx); if (ctl->tx)
tb_ring_free(ctl->tx);
/* free RX packets */ for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_pkg_free(ctl->rx_packets[i]);
dma_pool_destroy(ctl->frame_pool);
kfree(ctl);
}
/** * tb_ctl_start() - start/resume the control channel * @ctl: Control channel to start
*/ void tb_ctl_start(struct tb_ctl *ctl)
{ int i;
tb_ctl_dbg(ctl, "control channel starting...\n");
tb_ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
tb_ring_start(ctl->rx); for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
tb_ctl_rx_submit(ctl->rx_packets[i]);
ctl->running = true;
}
/** * tb_ctl_stop() - pause the control channel * @ctl: Control channel to stop * * All invocations of ctl->callback will have finished after this method * returns. * * Must NOT be called from ctl->callback.
*/ void tb_ctl_stop(struct tb_ctl *ctl)
{
mutex_lock(&ctl->request_queue_lock);
ctl->running = false;
mutex_unlock(&ctl->request_queue_lock);
tb_ring_stop(ctl->rx);
tb_ring_stop(ctl->tx);
if (!list_empty(&ctl->request_queue))
tb_ctl_WARN(ctl, "dangling request in request_queue\n");
INIT_LIST_HEAD(&ctl->request_queue);
tb_ctl_dbg(ctl, "control channel stopped\n");
}
/* public interface, commands */
/** * tb_cfg_ack_notification() - Ack notification * @ctl: Control channel to use * @route: Router that originated the event * @error: Pointer to the notification package * * Call this as response for non-plug notification to ack it. Returns * %0 on success or an error code on failure.
*/ int tb_cfg_ack_notification(struct tb_ctl *ctl, u64 route, conststruct cfg_error_pkg *error)
{ struct cfg_ack_pkg pkg = {
.header = tb_cfg_make_header(route),
}; constchar *name;
switch (error->error) { case TB_CFG_ERROR_LINK_ERROR:
name = "link error"; break; case TB_CFG_ERROR_HEC_ERROR_DETECTED:
name = "HEC error"; break; case TB_CFG_ERROR_FLOW_CONTROL_ERROR:
name = "flow control error"; break; case TB_CFG_ERROR_DP_BW:
name = "DP_BW"; break; case TB_CFG_ERROR_ROP_CMPLT:
name = "router operation completion"; break; case TB_CFG_ERROR_POP_CMPLT:
name = "port operation completion"; break; case TB_CFG_ERROR_PCIE_WAKE:
name = "PCIe wake"; break; case TB_CFG_ERROR_DP_CON_CHANGE:
name = "DP connector change"; break; case TB_CFG_ERROR_DPTX_DISCOVERY:
name = "DPTX discovery"; break; case TB_CFG_ERROR_LINK_RECOVERY:
name = "link recovery"; break; case TB_CFG_ERROR_ASYM_LINK:
name = "asymmetric link"; break; default:
name = "unknown"; break;
}
tb_ctl_dbg(ctl, "acking %s (%#x) notification on %llx\n", name,
error->error, route);
/** * tb_cfg_ack_plug() - Ack hot plug/unplug event * @ctl: Control channel to use * @route: Router that originated the event * @port: Port where the hot plug/unplug happened * @unplug: Ack hot plug or unplug * * Call this as response for hot plug/unplug event to ack it. * Returns %0 on success or an error code on failure.
*/ int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug)
{ struct cfg_error_pkg pkg = {
.header = tb_cfg_make_header(route),
.port = port,
.error = TB_CFG_ERROR_ACK_PLUG_EVENT,
.pg = unplug ? TB_CFG_ERROR_PG_HOT_UNPLUG
: TB_CFG_ERROR_PG_HOT_PLUG,
};
tb_ctl_dbg(ctl, "acking hot %splug event on %llx:%u\n",
unplug ? "un" : "", route, port); return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
}
if (pkg->frame.eof == TB_CFG_PKG_ERROR) returntrue;
if (pkg->frame.eof != req->response_type) returnfalse; if (route != tb_cfg_get_route(req->request)) returnfalse; if (pkg->frame.size != req->response_size) returnfalse;
/* Now make sure it is in expected format */
res = parse_header(pkg, req->response_size, req->response_type,
tb_cfg_get_route(req->request)); if (!res.err)
memcpy(req->response, pkg->buffer, req->response_size);
req->result = res;
/* Always complete when first response is received */ returntrue;
}
/** * tb_cfg_reset() - send a reset packet and wait for a response * @ctl: Control channel pointer * @route: Router string for the router to send reset * * If the switch at route is incorrectly configured then we will not receive a * reply (even though the switch will reset). The caller should check for * -ETIMEDOUT and attempt to reconfigure the switch.
*/ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route)
{ struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; struct tb_cfg_result res = { 0 }; struct tb_cfg_header reply; struct tb_cfg_request *req;
res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec);
tb_cfg_request_put(req);
return res;
}
/** * tb_cfg_read_raw() - read from config space into buffer * @ctl: Pointer to the control channel * @buffer: Buffer where the data is read * @route: Route string of the router * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise * @space: Config space selector * @offset: Dword word offset of the register to start reading * @length: Number of dwords to read * @timeout_msec: Timeout in ms how long to wait for the response * * Reads from router config space without translating the possible error.
*/ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
u64 route, u32 port, enum tb_cfg_space space,
u32 offset, u32 length, int timeout_msec)
{ struct tb_cfg_result res = { 0 }; struct cfg_read_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.port = port,
.space = space,
.offset = offset,
.length = length,
},
}; struct cfg_write_pkg reply; int retries = 0;
while (retries < TB_CTL_RETRIES) { struct tb_cfg_request *req;
/** * tb_cfg_write_raw() - write from buffer into config space * @ctl: Pointer to the control channel * @buffer: Data to write * @route: Route string of the router * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise * @space: Config space selector * @offset: Dword word offset of the register to start writing * @length: Number of dwords to write * @timeout_msec: Timeout in ms how long to wait for the response * * Writes to router config space without translating the possible error.
*/ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, constvoid *buffer,
u64 route, u32 port, enum tb_cfg_space space,
u32 offset, u32 length, int timeout_msec)
{ struct tb_cfg_result res = { 0 }; struct cfg_write_pkg request = {
.header = tb_cfg_make_header(route),
.addr = {
.port = port,
.space = space,
.offset = offset,
.length = length,
},
}; struct cfg_read_pkg reply; int retries = 0;
memcpy(&request.data, buffer, length * 4);
while (retries < TB_CTL_RETRIES) { struct tb_cfg_request *req;
staticint tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, conststruct tb_cfg_result *res)
{ /* * For unimplemented ports access to port config space may return * TB_CFG_ERROR_INVALID_CONFIG_SPACE (alternatively their type is * set to TB_TYPE_INACTIVE). In the former case return -ENODEV so * that the caller can mark the port as disabled.
*/ if (space == TB_CFG_PORT &&
res->tb_error == TB_CFG_ERROR_INVALID_CONFIG_SPACE) return -ENODEV;
tb_cfg_print_error(ctl, space, res);
if (res->tb_error == TB_CFG_ERROR_LOCK) return -EACCES; if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) return -ENOTCONN;
/** * tb_cfg_get_upstream_port() - get upstream port number of switch at route * @ctl: Pointer to the control channel * @route: Route string of the router * * Reads the first dword from the switches TB_CFG_SWITCH config area and * returns the port number from which the reply originated. * * Return: Returns the upstream port number on success or an error code on * failure.
*/ int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
{
u32 dummy; struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
TB_CFG_SWITCH, 0, 1,
ctl->timeout_msec); if (res.err == 1) return -EIO; if (res.err) return res.err; return res.response_port;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.