/* * Contains a bit mask, used to determine, whether correspondent * pipe allows SAGV or not.
*/
u8 pipe_sagv_reject;
/* bitmask of active pipes */
u8 active_pipes;
/* * From MTL onwards, to lock a QGV point, punit expects the peak BW of * the selected QGV point as the parameter in multiples of 100MB/s
*/
u16 qgv_point_peakbw;
/* * Current QGV points mask, which restricts * some particular SAGV states, not to confuse * with pipe_sagv_mask.
*/
u16 qgv_points_mask;
/* * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects * it with failure if we try masking any unadvertised points. * So need to operate only with those returned from PCode.
*/ if (num_qgv_points > 0)
qgv_points = GENMASK(num_qgv_points - 1, 0);
if (num_psf_gv_points > 0)
psf_points = GENMASK(num_psf_gv_points - 1, 0);
staticint icl_pcode_restrict_qgv_points(struct intel_display *display,
u32 points_mask)
{ int ret;
if (DISPLAY_VER(display) >= 14) return 0;
/* bspec says to keep retrying for at least 1 ms */
ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
points_mask,
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
1);
if (ret < 0) {
drm_err(display->drm, "Failed to disable qgv points (0x%x) points: 0x%x\n",
ret, points_mask); return ret;
}
if (qi->num_psf_points > 0) {
ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points); if (ret) {
drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
qi->num_psf_points = 0;
}
for (i = 0; i < qi->num_psf_points; i++)
drm_dbg_kms(display->drm, "PSF GV %d: CLK=%d \n",
i, qi->psf_points[i].clk);
}
return 0;
}
staticint adl_calc_psf_bw(int clk)
{ /* * clk is multiples of 16.666MHz (100/6) * According to BSpec PSF GV bandwidth is * calculated as BW = 64 * clk * 16.666Mhz
*/ return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
}
staticint icl_get_bw_info(struct intel_display *display, conststruct dram_info *dram_info, conststruct intel_sa_info *sa)
{ struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ int num_channels = max_t(u8, 1, dram_info->num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; int num_groups = ARRAY_SIZE(display->bw.max); int i, ret;
ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile); if (ret) {
drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret;
}
drm_dbg_kms(display->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
i, j, bi->num_planes, bi->deratedbw[j]);
}
} /* * In case if SAGV is disabled in BIOS, we always get 1 * SAGV point, but we can't send PCode commands to restrict it * as it will fail and pointless anyway.
*/ if (qi.num_points == 1)
display->sagv.status = I915_SAGV_NOT_CONTROLLED; else
display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
staticint tgl_get_bw_info(struct intel_display *display, conststruct dram_info *dram_info, conststruct intel_sa_info *sa)
{ struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ int num_channels = max_t(u8, 1, dram_info->num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw, peakbw; int clperchgroup; int num_groups = ARRAY_SIZE(display->bw.max); int i, ret;
ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile); if (ret) {
drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret;
}
if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
drm_warn(display->drm, "Number of channels exceeds max number of channels."); if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
/* * In case if SAGV is disabled in BIOS, we always get 1 * SAGV point, but we can't send PCode commands to restrict it * as it will fail and pointless anyway.
*/ if (qi.num_points == 1)
display->sagv.status = I915_SAGV_NOT_CONTROLLED; else
display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
staticvoid dg2_get_bw_info(struct intel_display *display)
{ unsignedint deratedbw = display->platform.dg2_g11 ? 38000 : 50000; int num_groups = ARRAY_SIZE(display->bw.max); int i;
/* * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth * that doesn't depend on the number of planes enabled. So fill all the * plane group with constant bw information for uniformity with other * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth, * whereas DG2-G11 platforms have 38 GB/s.
*/ for (i = 0; i < num_groups; i++) { struct intel_bw_info *bi = &display->bw.max[i];
bi->num_planes = 1; /* Need only one dummy QGV point per group */
bi->num_qgv_points = 1;
bi->deratedbw[0] = deratedbw;
}
drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
i, display->bw.max[0].deratedbw[i],
display->bw.max[0].peakbw[i]);
}
/* Bandwidth does not depend on # of planes; set all groups the same */
display->bw.max[0].num_planes = 1;
display->bw.max[0].num_qgv_points = qi.num_points; for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
memcpy(&display->bw.max[i], &display->bw.max[0], sizeof(display->bw.max[0]));
/* * Xe2_HPD should always have exactly two QGV points representing * battery and plugged-in operation.
*/
drm_WARN_ON(display->drm, qi.num_points != 2);
display->sagv.status = I915_SAGV_ENABLED;
return 0;
}
staticunsignedint icl_max_bw_index(struct intel_display *display, int num_planes, int qgv_point)
{ int i;
/* * Let's return max bw for 0 planes
*/
num_planes = max(1, num_planes);
for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) { conststruct intel_bw_info *bi =
&display->bw.max[i];
/* * Pcode will not expose all QGV points when * SAGV is forced to off/min/med/max.
*/ if (qgv_point >= bi->num_qgv_points) return UINT_MAX;
if (num_planes >= bi->num_planes) return i;
}
return UINT_MAX;
}
staticunsignedint tgl_max_bw_index(struct intel_display *display, int num_planes, int qgv_point)
{ int i;
/* * Let's return max bw for 0 planes
*/
num_planes = max(1, num_planes);
for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) { conststruct intel_bw_info *bi =
&display->bw.max[i];
/* * Pcode will not expose all QGV points when * SAGV is forced to off/min/med/max.
*/ if (qgv_point >= bi->num_qgv_points) return UINT_MAX;
staticunsignedint intel_bw_crtc_num_active_planes(conststruct intel_crtc_state *crtc_state)
{ /* * We assume cursors are small enough * to not not cause bandwidth problems.
*/ return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
}
for_each_plane_id_on_crtc(crtc, plane_id) { /* * We assume cursors are small enough * to not not cause bandwidth problems.
*/ if (plane_id == PLANE_CURSOR) continue;
data_rate += crtc_state->data_rate[plane_id];
if (DISPLAY_VER(display) < 11)
data_rate += crtc_state->data_rate_y[plane_id];
}
for (i = 0; i < num_qgv_points; i++) { unsignedint max_data_rate =
icl_qgv_bw(display, num_active_planes, i);
/* * We need to know which qgv point gives us * maximum bandwidth in order to disable SAGV * if we find that we exceed SAGV block time * with watermarks. By that moment we already * have those, as it is calculated earlier in * intel_atomic_check,
*/ if (max_data_rate > max_bw) {
max_bw_point = BIT(i);
max_bw = max_data_rate;
}
}
/* * Restrict required qgv points before updating the configuration. * According to BSpec we can't mask and unmask qgv points at the same * time. Also masking should be done before updating the configuration * and unmasking afterwards.
*/
icl_pcode_restrict_qgv_points(display, new_mask);
}
/* * Allow required qgv points after updating the configuration. * According to BSpec we can't mask and unmask qgv points at the same * time. Also masking should be done before updating the configuration * and unmasking afterwards.
*/
icl_pcode_restrict_qgv_points(display, new_mask);
}
ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret;
/* * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is * not enabled. PM Demand code will clamp the value for the register
*/ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
new_bw_state->qgv_point_peakbw = U16_MAX;
drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw."); return 0;
}
/* * Find the best QGV point by comparing the data_rate with max data rate * offered per plane group
*/ for (i = 0; i < num_qgv_points; i++) { unsignedint bw_index =
tgl_max_bw_index(display, num_active_planes, i); unsignedint max_data_rate;
if (bw_index >= ARRAY_SIZE(display->bw.max)) continue;
drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
i, max_data_rate, data_rate, qgv_peak_bw);
}
drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
qgv_peak_bw, data_rate);
/* * The display configuration cannot be supported if no QGV point * satisfying the required data rate is found
*/ if (qgv_peak_bw == 0) {
drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
data_rate, num_active_planes); return -EINVAL;
}
/* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);
ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret;
for (i = 0; i < num_qgv_points; i++) { unsignedint max_data_rate = icl_qgv_bw(display,
num_active_planes, i); if (max_data_rate >= data_rate)
qgv_points |= BIT(i);
drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
i, max_data_rate, data_rate);
}
for (i = 0; i < num_psf_gv_points; i++) { unsignedint max_data_rate = adl_psf_bw(display, i);
if (max_data_rate >= data_rate)
psf_points |= BIT(i);
drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d" " required %d\n",
i, max_data_rate, data_rate);
}
/* * BSpec states that we always should have at least one allowed point * left, so if we couldn't - simply reject the configuration for obvious * reasons.
*/ if (qgv_points == 0) {
drm_dbg_kms(display->drm, "No QGV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes); return -EINVAL;
}
if (num_psf_gv_points > 0 && psf_points == 0) {
drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n",
data_rate, num_active_planes); return -EINVAL;
}
/* * Leave only single point with highest bandwidth, if * we can't enable SAGV due to the increased memory latency it may * cause.
*/ if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
qgv_points);
}
/* * We store the ones which need to be masked as that is what PCode * actually accepts as a parameter.
*/
new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
qgv_points,
psf_points); /* * If the actual mask had changed we need to make sure that * the commits are serialized(in case this is a nomodeset, nonblocking)
*/ if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
ret = intel_atomic_serialize_global_state(&new_bw_state->base); if (ret) return ret;
}
/* * The arbiter can only really guarantee an * equal share of the total bw to each plane.
*/
for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
dbuf_bw->active_planes[slice] |= BIT(plane_id);
}
}
for_each_plane_id_on_crtc(crtc, plane_id) { /* * We assume cursors are small enough * to not cause bandwidth problems.
*/ if (plane_id == PLANE_CURSOR) continue;
/* * The arbiter can only really guarantee an * equal share of the total bw to each plane.
*/
for_each_pipe(display, pipe) { conststruct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) { int ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret;
}
/* * No need to check against the cdclk state if * the min cdclk doesn't increase. * * Ie. we only ever increase the cdclk due to bandwidth * requirements. This can reduce back and forth * display blinking due to constant cdclk changes.
*/ if (new_min_cdclk <= old_min_cdclk) return 0;
cdclk_state = intel_atomic_get_cdclk_state(state); if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state);
/* * No need to recalculate the cdclk state if * the min cdclk doesn't increase. * * Ie. we only ever increase the cdclk due to bandwidth * requirements. This can reduce back and forth * display blinking due to constant cdclk changes.
*/ if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state)) return 0;
drm_dbg_kms(display->drm, "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
*need_cdclk_calc = true;
/* * Avoid locking the bw state when * nothing significant has changed.
*/ if (old_data_rate == new_data_rate &&
old_active_planes == new_active_planes) continue;
new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state);
drm_dbg_kms(display->drm, "[CRTC:%d:%s] data rate %u num active planes %u\n",
crtc->base.base.id, crtc->base.name,
new_bw_state->data_rate[crtc->pipe],
new_bw_state->num_active_planes[crtc->pipe]);
}
drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
pipe_name(crtc->pipe),
bw_state->data_rate[crtc->pipe],
bw_state->num_active_planes[crtc->pipe]);
}
/* * Limit this only if we have SAGV. And for Display version 14 onwards * sagv is handled though pmdemand requests
*/ if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
icl_force_disable_sagv(display, state);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.