/* * The following helpers provide a way to read out the tunneling DPCD * registers with a minimal amount of AUX transfers (1 transfer per contiguous * range, as permitted by the 16 byte per transfer AUX limit), not accessing * other registers to avoid any read side-effects.
*/ staticint next_reg_area(int *offset)
{
*offset = find_next_bit(dptun_info_regs, 64, *offset);
/* Return granularity in kB/s units */ staticint tunnel_reg_bw_granularity(conststruct drm_dp_tunnel_regs *regs)
{ int gr = tunnel_reg(regs, DP_BW_GRANULARITY) & DP_BW_GRANULARITY_MASK;
for (i = 0; i < mgr->group_count; i++) { /* * A tunnel group with 0 group ID shouldn't have more than one * tunnels.
*/ if (tunnel_group_id(drv_group_id) &&
mgr->groups[i].drv_group_id == drv_group_id) return &mgr->groups[i];
if (!group && !mgr->groups[i].active)
group = &mgr->groups[i];
}
if (!group) {
drm_dbg_kms(mgr->dev, "DPTUN: Can't allocate more tunnel groups\n"); return NULL;
}
/* * The group name format here and elsewhere: Driver-ID:Group-ID:* * (* standing for all DP-Adapters/tunnels in the group).
*/
snprintf(group->name, sizeof(group->name), "%d:%d:*",
tunnel_group_drv_id(drv_group_id) & ((1 << DP_GROUP_ID_BITS) - 1),
tunnel_group_id(drv_group_id) & ((1 << DP_USB4_DRIVER_ID_BITS) - 1));
/** * drm_dp_tunnel_get - Get a reference for a DP tunnel * @tunnel: Tunnel object * @tracker: Debug tracker for the reference * * Get a reference for @tunnel, along with a debug tracker to help locating * the source of a reference leak/double reference put etc. issue. * * The reference must be dropped after use calling drm_dp_tunnel_put() * passing @tunnel and *@tracker returned from here. * * Returns @tunnel - as a convenience - along with *@tracker.
*/ struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker)
{
track_tunnel_ref(tunnel, tracker);
/** * drm_dp_tunnel_put - Put a reference for a DP tunnel * @tunnel: Tunnel object * @tracker: Debug tracker for the reference * * Put a reference for @tunnel along with its debug *@tracker, which * was obtained with drm_dp_tunnel_get().
*/ void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker)
{
untrack_tunnel_ref(tunnel, tracker);
tunnel->bw_granularity = tunnel_reg_bw_granularity(regs);
tunnel->allocated_bw = tunnel_reg(regs, DP_ALLOCATED_BW) *
tunnel->bw_granularity; /* * An initial allocated BW of 0 indicates an undefined state: the * actual allocation is determined by the TBT CM, usually following a * legacy allocation policy (based on the max DPRX caps). From the * driver's POV the state becomes defined only after the first * allocation request.
*/ if (!tunnel->allocated_bw)
tunnel->allocated_bw = -1;
/** * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel * @tunnel: Tunnel object * * Set the IO error flag for @tunnel. Drivers can call this function upon * detecting a failure that affects the tunnel functionality, for instance * after a DP AUX transfer failure on the port @tunnel is connected to. * * This disables further management of @tunnel, including any related * AUX accesses for tunneling DPCD registers, returning error to the * initiators of these. The driver is supposed to drop this tunnel and - * optionally - recreate it.
*/ void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
{
tunnel->has_io_error = true;
}
EXPORT_SYMBOL(drm_dp_tunnel_set_io_error);
if (!tunnel_reg_bw_alloc_supported(regs)) { if (tunnel_group_id(drv_group_id)) {
drm_dbg_kms(mgr->dev, "DPTUN: A non-zero group ID is only allowed with BWA support\n");
ret = false;
}
if (tunnel_reg(regs, DP_ALLOCATED_BW)) {
drm_dbg_kms(mgr->dev, "DPTUN: BW is allocated without BWA support\n");
ret = false;
}
return ret;
}
if (!tunnel_group_id(drv_group_id)) {
drm_dbg_kms(mgr->dev, "DPTUN: BWA support requires a non-zero group ID\n");
ret = false;
}
if (check_dprx && hweight8(tunnel_reg_max_dprx_lane_count(regs)) != 1) {
drm_dbg_kms(mgr->dev, "DPTUN: Invalid DPRX lane count: %d\n",
tunnel_reg_max_dprx_lane_count(regs));
ret = false;
}
if (check_dprx && !tunnel_reg_max_dprx_rate(regs)) {
drm_dbg_kms(mgr->dev, "DPTUN: DPRX rate is 0\n");
ret = false;
}
if (tunnel_reg_bw_granularity(regs) < 0) {
drm_dbg_kms(mgr->dev, "DPTUN: Invalid BW granularity\n");
staticint dev_id_len(const u8 *dev_id, int max_len)
{ while (max_len && dev_id[max_len - 1] == '\0')
max_len--;
return max_len;
}
staticint get_max_dprx_bw(conststruct drm_dp_tunnel *tunnel)
{ int max_dprx_bw = drm_dp_max_dprx_data_rate(tunnel->max_dprx_rate,
tunnel->max_dprx_lane_count);
/* * A BW request of roundup(max_dprx_bw, tunnel->bw_granularity) results in * an allocation of max_dprx_bw. A BW request above this rounded-up * value will fail.
*/ return min(roundup(max_dprx_bw, tunnel->bw_granularity),
MAX_DP_REQUEST_BW * tunnel->bw_granularity);
}
/** * drm_dp_tunnel_detect - Detect DP tunnel on the link * @mgr: Tunnel manager * @aux: DP AUX on which the tunnel will be detected * * Detect if there is any DP tunnel on the link and add it to the tunnel * group's tunnel list. * * Returns a pointer to a tunnel on success, or an ERR_PTR() error on * failure.
*/ struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
{ struct drm_dp_tunnel_regs regs; struct drm_dp_tunnel *tunnel; int err;
err = read_tunnel_regs(aux, ®s); if (err) return ERR_PTR(err);
if (!(tunnel_reg(®s, DP_TUNNELING_CAPABILITIES) &
DP_TUNNELING_SUPPORT)) return ERR_PTR(-ENODEV);
/* The DPRX caps are valid only after enabling BW alloc mode. */ if (!tunnel_regs_are_valid(mgr, ®s, SKIP_DPRX_CAPS_CHECK)) return ERR_PTR(-EINVAL);
tunnel = create_tunnel(mgr, aux, ®s); if (!tunnel) return ERR_PTR(-ENOMEM);
/** * drm_dp_tunnel_destroy - Destroy tunnel object * @tunnel: Tunnel object * * Remove the tunnel from the tunnel topology and destroy it. * * Returns 0 on success, -ENODEV if the tunnel has been destroyed already.
*/ int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
{ if (!tunnel) return 0;
if (drm_WARN_ON(tunnel->group->mgr->dev, tunnel->destroyed)) return -ENODEV;
tun_dbg(tunnel, "destroying\n");
tunnel->destroyed = true;
destroy_tunnel(tunnel);
return 0;
}
EXPORT_SYMBOL(drm_dp_tunnel_destroy);
staticint check_tunnel(conststruct drm_dp_tunnel *tunnel)
{ if (tunnel->destroyed) return -ENODEV;
/* * The estimated BW reported by the TBT Connection Manager for each tunnel in * a group includes the BW already allocated for the given tunnel and the * unallocated BW which is free to be used by any tunnel in the group.
*/ staticint group_free_bw(conststruct drm_dp_tunnel *tunnel)
{ return tunnel->estimated_bw - tunnel_allocated_bw(tunnel);
}
if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) goto out_err;
if (enable)
val |= mask; else
val &= ~mask;
if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) goto out_err;
tunnel->bw_alloc_enabled = enable;
return 0;
out_err:
drm_dp_tunnel_set_io_error(tunnel);
return -EIO;
}
/** * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode * @tunnel: Tunnel object * * Enable the DP tunnel BW allocation mode on @tunnel if it supports it. * * Returns 0 in case of success, negative error code otherwise.
*/ int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
{ struct drm_dp_tunnel_regs regs; int err;
err = check_tunnel(tunnel); if (err) return err;
if (!tunnel->bw_alloc_supported) return -EOPNOTSUPP;
if (!tunnel_group_id(tunnel->group->drv_group_id)) return -EINVAL;
err = set_bw_alloc_mode(tunnel, true); if (err) goto out;
/* * After a BWA disable/re-enable sequence the allocated BW can either * stay at its last requested value or, for instance after system * suspend/resume, TBT CM can reset back the allocation to the amount * allocated in the legacy/non-BWA mode. Accordingly allow for the * allocation to change wrt. the last SW state.
*/
err = read_and_verify_tunnel_regs(tunnel, ®s,
ALLOW_ALLOCATED_BW_CHANGE); if (err) {
set_bw_alloc_mode(tunnel, false);
goto out;
}
if (!tunnel->max_dprx_rate)
update_dprx_caps(tunnel, ®s);
out:
tun_dbg_stat(tunnel, err, "Allocating %d/%d Mb/s for tunnel: Group alloc:%d/%d Mb/s",
DPTUN_BW_ARG(request_bw * tunnel->bw_granularity),
DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
DPTUN_BW_ARG(tunnel->group->available_bw));
if (err == -EIO)
drm_dp_tunnel_set_io_error(tunnel);
return err;
}
/** * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel * @tunnel: Tunnel object * @bw: BW in kB/s units * * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by * calling this function for the same tunnel setting @bw to 0. * * Returns 0 in case of success, a negative error code otherwise.
*/ int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
{ int err;
/** * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel * @tunnel: Tunnel object * * Get the current BW allocated for @tunnel. After the tunnel is created / * resumed and the BW allocation mode is enabled for it, the allocation * becomes determined only after the first allocation request by the driver * calling drm_dp_tunnel_alloc_bw(). * * Return the BW allocated for the tunnel, or -1 if the allocation is * undetermined.
*/ int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
{ return tunnel->allocated_bw;
}
EXPORT_SYMBOL(drm_dp_tunnel_get_allocated_bw);
/* * Return 0 if the status hasn't changed, 1 if the status has changed, a * negative error code in case of an I/O failure.
*/ staticint check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
{
u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED;
u8 val;
if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0) goto out_err;
val &= mask;
if (val) { if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0) goto out_err;
return 1;
}
if (!drm_dp_tunnel_bw_alloc_is_enabled(tunnel)) return 0;
/* * Check for estimated BW changes explicitly to account for lost * BW change notifications.
*/ if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0) goto out_err;
if (val * tunnel->bw_granularity != tunnel->estimated_bw) return 1;
return 0;
out_err:
drm_dp_tunnel_set_io_error(tunnel);
return -EIO;
}
/** * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state * @tunnel: Tunnel object * * Update the SW state of @tunnel with the HW state. * * Returns 0 if the state has not changed, 1 if it has changed and got updated * successfully and a negative error code otherwise.
*/ int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
{ struct drm_dp_tunnel_regs regs; bool changed = false; int ret;
ret = check_tunnel(tunnel); if (ret < 0) return ret;
ret = check_and_clear_status_change(tunnel); if (ret < 0) goto out;
if (!ret) return 0;
ret = read_and_verify_tunnel_regs(tunnel, ®s, 0); if (ret) goto out;
if (update_dprx_caps(tunnel, ®s))
changed = true;
ret = update_group_available_bw(tunnel, ®s); if (ret == 1)
changed = true;
out:
tun_dbg_stat(tunnel, ret < 0 ? ret : 0, "State update: Changed:%s DPRX:%dx%d Tunnel alloc:%d/%d Group alloc:%d/%d Mb/s",
str_yes_no(changed),
tunnel->max_dprx_rate / 100, tunnel->max_dprx_lane_count,
DPTUN_BW_ARG(tunnel->allocated_bw),
DPTUN_BW_ARG(get_max_tunnel_bw(tunnel)),
DPTUN_BW_ARG(group_allocated_bw(tunnel->group)),
DPTUN_BW_ARG(tunnel->group->available_bw));
/* * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs * * Handle any pending DP tunnel IRQs, waking up waiters for a completion * event. * * Returns 1 if the state of the tunnel has changed which requires calling * drm_dp_tunnel_update_state(), a negative error code in case of a failure, * 0 otherwise.
*/ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
{
u8 val;
if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0) return -EIO;
if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED))
wake_up_all(&mgr->bw_req_queue);
if (val & (DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED)) return 1;
/** * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX * @tunnel: Tunnel object * * The function is used to query the maximum link rate of the DPRX connected * to @tunnel. Note that this rate will not be limited by the BW limit of the * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD * registers. * * Returns the maximum link rate in 10 kbit/s units.
*/ int drm_dp_tunnel_max_dprx_rate(conststruct drm_dp_tunnel *tunnel)
{ return tunnel->max_dprx_rate;
}
EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_rate);
/** * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX * @tunnel: Tunnel object * * The function is used to query the maximum lane count of the DPRX connected * to @tunnel. Note that this lane count will not be limited by the BW limit of * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD * registers. * * Returns the maximum lane count.
*/ int drm_dp_tunnel_max_dprx_lane_count(conststruct drm_dp_tunnel *tunnel)
{ return tunnel->max_dprx_lane_count;
}
EXPORT_SYMBOL(drm_dp_tunnel_max_dprx_lane_count);
/** * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel * @tunnel: Tunnel object * * This function is used to query the estimated total available BW of the * tunnel. This includes the currently allocated and free BW for all the * tunnels in @tunnel's group. The available BW is valid only after the BW * allocation mode has been enabled for the tunnel and its state got updated * calling drm_dp_tunnel_update_state(). * * Returns the @tunnel group's estimated total available bandwidth in kB/s * units, or -1 if the available BW isn't valid (the BW allocation mode is * not enabled or the tunnel's state hasn't been updated).
*/ int drm_dp_tunnel_available_bw(conststruct drm_dp_tunnel *tunnel)
{ return tunnel->group->available_bw;
}
EXPORT_SYMBOL(drm_dp_tunnel_available_bw);
/** * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel * @state: Atomic state * @tunnel: Tunnel to get the state for * * Get the new atomic state for @tunnel, duplicating it from the old tunnel * state if not yet allocated. * * Return the state or an ERR_PTR() error on failure.
*/ struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state, struct drm_dp_tunnel *tunnel)
{ struct drm_dp_tunnel_group_state *group_state; struct drm_dp_tunnel_state *tunnel_state;
group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel); if (IS_ERR(group_state)) return ERR_CAST(group_state);
tunnel_state = get_or_add_tunnel_state(group_state, tunnel); if (!tunnel_state) return ERR_PTR(-ENOMEM);
/** * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel * @state: Atomic state * @tunnel: Tunnel to get the state for * * Get the old atomic state for @tunnel. * * Return the old state or NULL if the tunnel's atomic state is not in @state.
*/ struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state, conststruct drm_dp_tunnel *tunnel)
{ struct drm_dp_tunnel_group_state *old_group_state; int i;
for_each_old_group_in_state(state, old_group_state, i) if (to_group(old_group_state->base.obj) == tunnel->group) return get_tunnel_state(old_group_state, tunnel);
/** * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel * @state: Atomic state * @tunnel: Tunnel to get the state for * * Get the new atomic state for @tunnel. * * Return the new state or NULL if the tunnel's atomic state is not in @state.
*/ struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state, conststruct drm_dp_tunnel *tunnel)
{ struct drm_dp_tunnel_group_state *new_group_state; int i;
for_each_new_group_in_state(state, new_group_state, i) if (to_group(new_group_state->base.obj) == tunnel->group) return get_tunnel_state(new_group_state, tunnel);
/** * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream * @state: Atomic state * @tunnel: DP tunnel containing the stream * @stream_id: Stream ID * @bw: BW of the stream * * Set a DP tunnel stream's required BW in the atomic state. * * Returns 0 in case of success, a negative error code otherwise.
*/ int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state, struct drm_dp_tunnel *tunnel,
u8 stream_id, int bw)
{ struct drm_dp_tunnel_group_state *new_group_state; struct drm_dp_tunnel_state *tunnel_state; int err;
if (drm_WARN_ON(tunnel->group->mgr->dev,
stream_id > BITS_PER_TYPE(tunnel_state->stream_mask))) return -EINVAL;
tun_dbg(tunnel, "Setting %d Mb/s for stream %d\n",
DPTUN_BW_ARG(bw), stream_id);
new_group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel); if (IS_ERR(new_group_state)) return PTR_ERR(new_group_state);
if (bw == 0) {
tunnel_state = get_tunnel_state(new_group_state, tunnel); if (!tunnel_state) return 0;
/** * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel * @tunnel_state: Atomic state of the queried tunnel * * Calculate the BW required by a tunnel adding up the required BW of all * the streams in the tunnel. * * Return the total BW required by the tunnel.
*/ int drm_dp_tunnel_atomic_get_required_bw(conststruct drm_dp_tunnel_state *tunnel_state)
{ int tunnel_bw = 0; int i;
if (!tunnel_state || !tunnel_state->stream_mask) return 0;
for (i = 0; i < hweight32(tunnel_state->stream_mask); i++)
tunnel_bw += tunnel_state->stream_bw[i];
/** * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group * @state: Atomic state * @tunnel: Tunnel object * @stream_mask: Mask of streams in @tunnel's group * * Get the mask of all the stream IDs in the tunnel group of @tunnel. * * Return 0 in case of success - with the stream IDs in @stream_mask - or a * negative error code in case of failure.
*/ int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state, conststruct drm_dp_tunnel *tunnel,
u32 *stream_mask)
{ struct drm_dp_tunnel_group_state *group_state; struct drm_dp_tunnel_state *tunnel_state;
group_state = drm_dp_tunnel_atomic_get_group_state(state, tunnel); if (IS_ERR(group_state)) return PTR_ERR(group_state);
/** * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state * @state: Atomic state * @failed_stream_mask: Mask of stream IDs with a BW limit failure * * Check the required BW of each DP tunnel in @state against both the DPRX BW * limit of the tunnel and the BW limit of the tunnel group. Return a mask of * stream IDs in @failed_stream_mask once a check fails. The mask will contain * either all the streams in a tunnel (in case a DPRX BW limit check failed) or * all the streams in a tunnel group (in case a group BW limit check failed). * * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit * check failed - with @failed_stream_mask containing the streams failing the * check - or a negative error code otherwise.
*/ int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
u32 *failed_stream_mask)
{ struct drm_dp_tunnel_group_state *new_group_state; int i;
for_each_new_group_in_state(state, new_group_state, i) { int ret;
ret = drm_dp_tunnel_atomic_check_group_bw(new_group_state,
failed_stream_mask); if (ret) return ret;
}
/** * drm_dp_tunnel_mgr_create - Create a DP tunnel manager * @dev: DRM device object * @max_group_count: Maximum number of tunnel groups * * Creates a DP tunnel manager for @dev. * * Returns a pointer to the tunnel manager if created successfully or error * pointer in case of failure.
*/ struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
{ struct drm_dp_tunnel_mgr *mgr; int i;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); if (!mgr) return ERR_PTR(-ENOMEM);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.