/* * DP MST (DisplayPort Multi-Stream Transport) * * MST support on the source depends on the platform and port. DP initialization * sets up MST for each MST capable encoder. This will become the primary * encoder for the port. * * MST initialization of each primary encoder creates MST stream encoders, one * per pipe, and initializes the MST topology manager. The MST stream encoders * are sometimes called "fake encoders", because they're virtual, not * physical. Thus there are (number of MST capable ports) x (number of pipes) * MST stream encoders in total. * * Decision to use MST for a sink happens at detect on the connector attached to * the primary encoder, and this will not change while the sink is connected. We * always use MST when possible, including for SST sinks with sideband messaging * support. * * The connectors for the MST streams are added and removed dynamically by the * topology manager. Their connection status is also determined by the topology * manager. * * On hardware, each transcoder may be associated with a single DDI * port. Multiple transcoders may be associated with the same DDI port only if * the port is in MST mode. * * On TGL+, all the transcoders streaming on the same DDI port will indicate a * primary transcoder; the TGL_DP_TP_CTL and TGL_DP_TP_STATUS registers are * relevant only on the primary transcoder. Prior to that, they are port * registers.
*/
if (!intel_dp_is_uhbr(crtc_state) || DISPLAY_VER(display) >= 20 || !dsc) return 0;
/* * DSC->DPT interface width: * ICL-MTL: 72 bits (each branch has 72 bits, only left branch is used) * LNL+: 144 bits (not a bottleneck in any config) * * Bspec/49259 suggests that the FEC overhead needs to be * applied here, though HW people claim that neither this FEC * or any other overhead is applicable here (that is the actual * available_bw is just symbol_clock * 72). However based on * testing on MTL-P the * - DELL U3224KBA display * - Unigraf UCD-500 CTS test sink * devices the * - 5120x2880/995.59Mhz * - 6016x3384/1357.23Mhz * - 6144x3456/1413.39Mhz * modes (all the ones having a DPT limit on the above devices), * both the channel coding efficiency and an additional 3% * overhead needs to be accounted for.
*/ return div64_u64(mul_u32_u32(intel_dp_link_symbol_clock(crtc_state->port_clock) * 72,
drm_dp_bw_channel_coding_efficiency(true)),
mul_u32_u32(adjusted_mode->crtc_clock, 1030000));
}
staticint intel_dp_mst_bw_overhead(conststruct intel_crtc_state *crtc_state, bool ssc, int dsc_slice_count, int bpp_x16)
{ conststruct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode; unsignedlong flags = DRM_DP_BW_OVERHEAD_MST; int overhead;
/* * TODO: clarify whether a minimum required by the fixed FEC overhead * in the bspec audio programming sequence is required here.
*/ return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
}
staticvoid intel_dp_mst_compute_m_n(conststruct intel_crtc_state *crtc_state, int overhead, int bpp_x16, struct intel_link_m_n *m_n)
{ conststruct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
/* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
adjusted_mode->crtc_clock,
crtc_state->port_clock,
overhead,
m_n);
staticint intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
{ int effective_data_rate =
intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
/* * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted * to calculate PBN with the BW overhead passed to it.
*/ return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
}
if (!bpp_step_x16) { /* Allow using zero step only to indicate single try for a given bpp. */
drm_WARN_ON(display->drm, min_bpp_x16 != max_bpp_x16);
bpp_step_x16 = 1;
}
if (is_mst) {
mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst.mgr); if (IS_ERR(mst_state)) return PTR_ERR(mst_state);
drm_dbg_kms(display->drm, "Looking for slots in range min bpp " FXP_Q4_FMT " max bpp " FXP_Q4_FMT "\n",
FXP_Q4_ARGS(min_bpp_x16), FXP_Q4_ARGS(max_bpp_x16));
if (dsc) {
dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state); if (!dsc_slice_count) {
drm_dbg_kms(display->drm, "Can't get valid DSC slice count\n");
if (dsc && !intel_dp_dsc_valid_compressed_bpp(intel_dp, bpp_x16)) { /* SST must have validated the single bpp tried here already earlier. */
drm_WARN_ON(display->drm, !is_mst); continue;
}
/* * The TU size programmed to the HW determines which slots in * an MTP frame are used for this stream, which needs to match * the payload size programmed to the first downstream branch * device's payload table. * * Note that atm the payload's PBN value DRM core sends via * the ALLOCATE_PAYLOAD side-band message matches the payload * size (which it calculates from the PBN value) it programs * to the first branch device's payload table. The allocation * in the payload table could be reduced though (to * crtc_state->dp_m_n.tu), provided that the driver doesn't * enable SSC on the corresponding link.
*/
pbn.full = dfixed_const(intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
link_bpp_x16,
remote_bw_overhead));
remote_tu = DIV_ROUND_UP(pbn.full, mst_state->pbn_div.full);
/* * Aligning the TUs ensures that symbols consisting of multiple * (4) symbol cycles don't get split between two consecutive * MTPs, as required by Bspec. * TODO: remove the alignment restriction for 128b/132b links * on some platforms, where Bspec allows this.
*/
remote_tu = ALIGN(remote_tu, 4 / crtc_state->lane_count);
/* * Also align PBNs accordingly, since MST core will derive its * own copy of TU from the PBN in drm_dp_atomic_find_time_slots(). * The above comment about the difference between the PBN * allocated for the whole path and the TUs allocated for the * first branch device's link also applies here.
*/
pbn.full = remote_tu * mst_state->pbn_div.full;
/* * FIXME: allocate the BW according to link_bpp, which in the case of * YUV420 is only half of the pipe bpp value.
*/ return intel_dp_mtp_tu_compute_config(intel_dp, crtc_state, conn_state,
limits->link.min_bpp_x16,
limits->link.max_bpp_x16,
fxp_q4_from_int(2 * 3), false);
}
staticint mst_stream_dsc_compute_link_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state, conststruct link_config_limits *limits)
{ struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = to_intel_connector(conn_state->connector); int num_bpc;
u8 dsc_bpc[3] = {}; int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp; int min_compressed_bpp_x16, max_compressed_bpp_x16; int bpp_step_x16;
if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state, limits)) returntrue;
if (!dsc) { if (intel_dp_supports_dsc(intel_dp, connector, crtc_state)) {
drm_dbg_kms(display->drm, "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name); returnfalse;
}
drm_dbg_kms(display->drm, "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name);
if (limits->link.max_bpp_x16 < fxp_q4_from_int(24)) returnfalse;
if (limits->link.min_bpp_x16 >= min_bpp_x16) returntrue;
drm_dbg_kms(display->drm, "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " FXP_Q4_FMT " in DSC mode due to hblank expansion quirk\n",
crtc->base.base.id, crtc->base.name,
connector->base.base.id, connector->base.name,
FXP_Q4_ARGS(min_bpp_x16));
if (limits->link.max_bpp_x16 < min_bpp_x16) returnfalse;
if (!dsc_needed) {
ret = mst_stream_compute_link_config(intel_dp, pipe_config,
conn_state, &limits);
if (ret == -EDEADLK) return ret;
if (ret)
dsc_needed = true;
}
if (dsc_needed && !intel_dp_supports_dsc(intel_dp, connector, pipe_config)) {
drm_dbg_kms(display->drm, "DSC required but not available\n"); return -EINVAL;
}
/* enable compression if the mode doesn't fit available BW */ if (dsc_needed) {
drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
if (!mst_stream_compute_config_limits(intel_dp, connector,
pipe_config, true,
&limits)) return -EINVAL;
/* * FIXME: As bpc is hardcoded to 8, as mentioned above, * WARN and ignore the debug flag force_dsc_bpc for now.
*/
drm_WARN(display->drm, intel_dp->force_dsc_bpc, "Cannot Force BPC for MST\n"); /* * Try to get at least some timeslots and then see, if * we can fit there with DSC.
*/
drm_dbg_kms(display->drm, "Trying to find VCPI slots in DSC mode\n");
ret = mst_stream_dsc_compute_link_config(intel_dp, pipe_config,
conn_state, &limits); if (ret < 0) return ret;
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits,
pipe_config->dp_m_n.tu);
}
/* * Iterate over all connectors and return a mask of * all CPU transcoders streaming over the same DP link.
*/ staticunsignedint
intel_dp_mst_transcoder_mask(struct intel_atomic_state *state, struct intel_dp *mst_port)
{ struct intel_display *display = to_intel_display(state); conststruct intel_digital_connector_state *conn_state; struct intel_connector *connector;
u8 transcoders = 0; int i;
ret = intel_link_bw_reduce_bpp(state, limits,
mst_port_pipes, "MST link BW");
return ret ? : -EAGAIN;
}
/** * intel_dp_mst_atomic_check_link - check all modeset MST link configuration * @state: intel atomic state * @limits: link BW limits * * Check the link configuration for all modeset MST outputs. If the * configuration is invalid @limits will be updated if possible to * reduce the total BW, after which the configuration for all CRTCs in * @state must be recomputed with the updated @limits. * * Returns: * - 0 if the configuration is valid * - %-EAGAIN, if the configuration is invalid and @limits got updated * with fallback values with which the configuration of all CRTCs in * @state must be recomputed * - Other negative error, if the configuration is invalid without a * fallback possibility, or the check failed for another reason
*/ int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state, struct intel_link_bw_limits *limits)
{ struct drm_dp_mst_topology_mgr *mgr; struct drm_dp_mst_topology_state *mst_state; int ret; int i;
for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
ret = intel_dp_mst_check_fec_change(state, mgr, limits); if (ret) return ret;
ret = intel_dp_mst_check_bw(state, mgr, mst_state,
limits); if (ret) return ret;
}
/* lowest numbered transcoder will be designated master */
crtc_state->mst_master_transcoder =
ffs(intel_dp_mst_transcoder_mask(state, intel_dp)) - 1;
return 0;
}
/* * If one of the connectors in a MST stream needs a modeset, mark all CRTCs * that shares the same MST stream as mode changed, * intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do * a fastset when possible. * * On TGL+ this is required since each stream go through a master transcoder, * so if the master transcoder needs modeset, all other streams in the * topology need a modeset. All platforms need to add the atomic state * for all streams in the topology, since a modeset on one may require * changing the MST link BW usage of the others, which in turn needs a * recomputation of the corresponding CRTC states.
*/ staticint
mst_connector_atomic_topology_check(struct intel_connector *connector, struct intel_atomic_state *state)
{ struct intel_display *display = to_intel_display(connector); struct drm_connector_list_iter connector_list_iter; struct intel_connector *connector_iter; int ret = 0;
if (!intel_connector_needs_modeset(state, &connector->base)) return 0;
ret = intel_digital_connector_atomic_check(&connector->base, &state->base); if (ret) return ret;
ret = mst_connector_atomic_topology_check(connector, state); if (ret) return ret;
if (intel_connector_needs_modeset(state, &connector->base)) {
ret = intel_dp_tunnel_atomic_check_state(state,
connector->mst.dp,
connector); if (ret) return ret;
}
if (DISPLAY_VER(display) >= 9)
skl_scaler_disable(old_pipe_crtc_state); else
ilk_pfit_disable(old_pipe_crtc_state);
}
/* * Power down mst path before disabling the port, otherwise we end * up getting interrupts from the sink upon detecting link loss.
*/
drm_dp_send_power_updown_phy(&intel_dp->mst.mgr, connector->mst.port, false);
/* * BSpec 4287: disable DIP after the transcoder is disabled and before * the transcoder clock select is set to none.
*/
intel_dp_set_infoframes(primary_encoder, false, old_crtc_state, NULL); /* * From TGL spec: "If multi-stream slave transcoder: Configure * Transcoder Clock Select to direct no clock to the transcoder" * * From older GENs spec: "Configure Transcoder Clock Select to direct * no clock to the transcoder"
*/ if (DISPLAY_VER(display) < 12 || !last_mst_stream)
intel_ddi_disable_transcoder_clock(old_crtc_state);
intel_mst->connector = NULL; if (last_mst_stream)
primary_encoder->post_disable(state, primary_encoder,
old_crtc_state, NULL);
if (intel_dp_mst_active_streams(intel_dp) == 0)
primary_encoder->pre_pll_enable(state, primary_encoder,
pipe_config, NULL); else /* * The port PLL state needs to get updated for secondary * streams as for the primary stream.
*/
intel_ddi_update_active_dpll(state, primary_encoder,
to_intel_crtc(pipe_config->uapi.crtc));
}
staticbool intel_mst_probed_link_params_valid(struct intel_dp *intel_dp, int link_rate, int lane_count)
{ return intel_dp->link.mst_probed_rate == link_rate &&
intel_dp->link.mst_probed_lane_count == lane_count;
}
staticvoid intel_mst_set_probed_link_params(struct intel_dp *intel_dp, int link_rate, int lane_count)
{
intel_dp->link.mst_probed_rate = link_rate;
intel_dp->link.mst_probed_lane_count = lane_count;
}
/* MST encoders are bound to a crtc, not to a connector, * force the mapping here for get_hw_state.
*/
connector->encoder = encoder;
intel_mst->connector = connector;
ret = drm_dp_add_payload_part1(&intel_dp->mst.mgr, mst_state,
drm_atomic_get_mst_payload_state(mst_state, connector->mst.port)); if (ret < 0)
intel_dp_queue_modeset_retry_for_link(state, primary_encoder, pipe_config);
/* * Before Gen 12 this is not done as part of * primary_encoder->pre_enable() and should be done here. For * Gen 12+ the step in which this should be done is different for the * first MST stream, so it's done on the DDI for the first stream and * here for the following ones.
*/ if (DISPLAY_VER(display) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
if (DISPLAY_VER(display) >= 13 && !first_mst_stream)
intel_ddi_config_transcoder_func(encoder, pipe_config);
/* * TODO: * - Also check if compression would allow for the mode * - Calculate the overhead using drm_dp_bw_overhead() / * drm_dp_bw_channel_coding_efficiency(), similarly to the * compute config code, as drm_dp_calc_pbn_mode() doesn't * account with all the overheads. * - Check here and during compute config the BW reported by * DFP_Link_Available_Payload_Bandwidth_Number (or the * corresponding link capabilities of the sink) in case the * stream is uncompressed for it by the last branch device.
*/
num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
mode->hdisplay, target_clock);
max_dotclk *= num_joined_pipes;
ret = drm_modeset_lock(&mgr->base.lock, ctx); if (ret) return ret;
if (intel_dp_has_dsc(connector)) { /* * TBD pass the connector BPC, * for now U8_MAX so that max BPC on that platform would be picked
*/ int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
staticbool mst_connector_get_hw_state(struct intel_connector *connector)
{ /* This is the MST stream encoder set in ->pre_enable, if any */ struct intel_encoder *encoder = intel_attached_encoder(connector); enum pipe pipe;
if (!encoder || !connector->base.state->crtc) returnfalse;
/* * Reuse the prop from the SST connector because we're * not allowed to create new props after device registration.
*/
connector->base.max_bpc_property =
intel_dp->attached_connector->base.max_bpc_property; if (connector->base.max_bpc_property)
drm_connector_attach_max_bpc_property(&connector->base, 6, 12);
/* * A logical port's OUI (at least for affected sinks) is all 0, so * instead of that the parent port's OUI is used for identification.
*/ if (drm_dp_mst_port_is_logical(connector->mst.port)) {
aux = drm_dp_mst_aux_for_parent(connector->mst.port); if (!aux)
aux = &connector->mst.dp->aux;
}
if (drm_dp_read_dpcd_caps(aux, dpcd) < 0) returnfalse;
if (drm_dp_read_desc(aux, &desc, drm_dp_is_branch(dpcd)) < 0) returnfalse;
if (!drm_dp_has_quirk(&desc,
DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC)) returnfalse;
/* * UHBR (MST sink) devices requiring this quirk don't advertise the * HBLANK expansion support. Presuming that they perform HBLANK * expansion internally, or are affected by this issue on modes with a * short HBLANK for other reasons.
*/ if (!drm_dp_128b132b_supported(dpcd) &&
!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE)) returnfalse;
encoder->type = INTEL_OUTPUT_DP_MST;
encoder->power_domain = primary_encoder->power_domain;
encoder->port = primary_encoder->port;
encoder->cloneable = 0; /* * This is wrong, but broken userspace uses the intersection * of possible_crtcs of all the encoders of a given connector * to figure out which crtcs can drive said connector. What * should be used instead is the union of possible_crtcs. * To keep such userspace functioning we must misconfigure * this to make sure the intersection is not empty :(
*/
encoder->pipe_mask = ~0;
int
intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
{ struct intel_display *display = to_intel_display(dig_port); struct intel_dp *intel_dp = &dig_port->dp; enum port port = dig_port->base.port; int ret;
if (!HAS_DP_MST(display) || intel_dp_is_edp(intel_dp)) return 0;
if (DISPLAY_VER(display) < 12 && port == PORT_A) return 0;
if (DISPLAY_VER(display) < 11 && port == PORT_E) return 0;
/** * intel_dp_mst_add_topology_state_for_connector - add MST topology state for a connector * @state: atomic state * @connector: connector to add the state for * @crtc: the CRTC @connector is attached to * * Add the MST topology state for @connector to @state. * * Returns 0 on success, negative error code on failure.
*/ staticint
intel_dp_mst_add_topology_state_for_connector(struct intel_atomic_state *state, struct intel_connector *connector, struct intel_crtc *crtc)
{ struct drm_dp_mst_topology_state *mst_state;
if (!connector->mst.dp) return 0;
mst_state = drm_atomic_get_mst_topology_state(&state->base,
&connector->mst.dp->mst.mgr); if (IS_ERR(mst_state)) return PTR_ERR(mst_state);
/** * intel_dp_mst_add_topology_state_for_crtc - add MST topology state for a CRTC * @state: atomic state * @crtc: CRTC to add the state for * * Add the MST topology state for @crtc to @state. * * Returns 0 on success, negative error code on failure.
*/ int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state, struct intel_crtc *crtc)
{ struct drm_connector *_connector; struct drm_connector_state *conn_state; int i;
/** * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC * @state: atomic state * @crtc: CRTC for which to check the modeset requirement * * Check if any change in a MST topology requires a forced modeset on @crtc in * this topology. One such change is enabling/disabling the DSC decompression * state in the first branch device's UFP DPCD as required by one CRTC, while * the other @crtc in the same topology is still active, requiring a full modeset * on @crtc.
*/ bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state, struct intel_crtc *crtc)
{ conststruct intel_connector *crtc_connector; conststruct drm_connector_state *conn_state; conststruct drm_connector *_connector; int i;
if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
INTEL_OUTPUT_DP_MST)) returnfalse;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.