// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com>
*/
/* * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at * the end of VFP. Translate the porch values relative to the line * counter positions.
*/
for (i = 0; i < cstate->num_mixers; i++) {
mixer[i].mixer_op_mode = 0; if (mixer[i].lm_ctl->ops.clear_all_blendstages)
mixer[i].lm_ctl->ops.clear_all_blendstages(
mixer[i].lm_ctl); if (mixer[i].lm_ctl->ops.set_active_fetch_pipes)
mixer[i].lm_ctl->ops.set_active_fetch_pipes(mixer[i].lm_ctl, NULL); if (mixer[i].lm_ctl->ops.set_active_pipes)
mixer[i].lm_ctl->ops.set_active_pipes(mixer[i].lm_ctl, NULL);
if (mixer[i].hw_lm->ops.clear_all_blendstages)
mixer[i].hw_lm->ops.clear_all_blendstages(mixer[i].hw_lm);
}
if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
&stage_cfg);
if (lm->ops.setup_blendstage)
lm->ops.setup_blendstage(lm, mixer[i].hw_lm->idx,
&stage_cfg);
}
}
/** * _dpu_crtc_complete_flip - signal pending page_flip events * Any pending vblank events are added to the vblank_event_list * so that the next vblank interrupt shall signal them. * However PAGE_FLIP events are not handled through the vblank_event_list. * This API signals any pending PAGE_FLIP events requested through * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event. * @crtc: Pointer to drm crtc structure
*/ staticvoid _dpu_crtc_complete_flip(struct drm_crtc *crtc)
{ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); struct drm_device *dev = crtc->dev; unsignedlong flags;
/** * dpu_crtc_get_intf_mode - get interface mode of the given crtc * @crtc: Pointert to crtc
*/ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
{ struct drm_encoder *encoder;
/* * TODO: This function is called from dpu debugfs and as part of atomic * check. When called from debugfs, the crtc->mutex must be held to * read crtc->state. However reading crtc->state from atomic check isn't * allowed (unless you have a good reason, a big comment, and a deep * understanding of how the atomic/modeset locks work (<- and this is * probably not possible)). So we'll keep the WARN_ON here for now, but * really we need to figure out a better way to track our operating mode
*/
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
/* TODO: Returns the first INTF_MODE, could there be multiple values? */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) return dpu_encoder_get_intf_mode(encoder);
return INTF_MODE_NONE;
}
/** * dpu_crtc_vblank_callback - called on vblank irq, issues completion events * @crtc: Pointer to drm crtc object
*/ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
{ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
/* keep statistics on vblank callback - with auto reset via debugfs */ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
dpu_crtc->vblank_cb_time = ktime_get(); else
dpu_crtc->vblank_cb_count++;
if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
| DPU_ENCODER_FRAME_EVENT_ERROR
| DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
if (atomic_read(&dpu_crtc->frame_pending) < 1) { /* ignore vblank when not pending */
} elseif (atomic_dec_return(&dpu_crtc->frame_pending) == 0) { /* release bandwidth and other resources */
trace_dpu_crtc_frame_event_done(DRMID(crtc),
fevent->event);
dpu_core_perf_crtc_release_bw(crtc);
} else {
trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
fevent->event);
}
if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
| DPU_ENCODER_FRAME_EVENT_ERROR))
frame_done = true;
}
if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
crtc->base.id, ktime_to_ns(fevent->ts));
if (frame_done)
complete_all(&dpu_crtc->frame_done_comp);
/** * dpu_crtc_frame_event_cb - crtc frame event callback API * @crtc: Pointer to crtc * @event: Event to process * * Encoder may call this for different events from different context - IRQ, * user thread, commit_thread, etc. Each event should be carefully reviewed and * should be processed in proper task context to avoid schedulin delay or * properly manage the irq context's bottom half processing.
*/ void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event)
{ struct dpu_crtc *dpu_crtc; struct msm_drm_private *priv; struct dpu_crtc_frame_event *fevent; unsignedlong flags;
u32 crtc_id;
/* Nothing to do on idle event */ if (event & DPU_ENCODER_FRAME_EVENT_IDLE) return;
/* if we cannot merge 2 LMs (no 3d mux) better to fail earlier * before even checking the width after the split
*/ if (!dpu_kms->catalog->caps->has_3d_merge &&
adj_mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) return -E2BIG;
for (i = 0; i < cstate->num_mixers; i++) { struct drm_rect *r = &cstate->lm_bounds[i];
r->x1 = crtc_split_width * i;
r->y1 = 0;
r->x2 = r->x1 + crtc_split_width;
r->y2 = adj_mode->vdisplay;
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width) return -E2BIG;
}
/* encoder will trigger pending mask now */
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
dpu_encoder_trigger_kickoff_pending(encoder);
/* * If no mixers have been allocated in dpu_crtc_atomic_check(), * it means we are trying to flush a CRTC whose state is disabled: * nothing else needs to be done.
*/ if (unlikely(!cstate->num_mixers)) return;
_dpu_crtc_blend_setup(crtc);
_dpu_crtc_setup_cp_blocks(crtc);
/* * PP_DONE irq is only used by command mode for now. * It is better to request pending before FLUSH and START trigger * to make sure no pp_done irq missed. * This is safe because no pp_done will happen before SW trigger * in command mode.
*/
}
/* * If no mixers has been allocated in dpu_crtc_atomic_check(), * it means we are trying to flush a CRTC whose state is disabled: * nothing else needs to be done.
*/ if (unlikely(!cstate->num_mixers)) return;
/* update performance setting before crtc kickoff */
dpu_core_perf_crtc_update(crtc, 1);
/* * Final plane updates: Give each plane a chance to complete all * required writes/flushing before crtc's "flush * everything" call below.
*/
drm_atomic_crtc_for_each_plane(plane, crtc) { if (dpu_crtc->smmu_state.transition_error)
dpu_plane_set_error(plane, true);
dpu_plane_flush(plane);
}
/* * Kickoff real time encoder last as it's the encoder that * will do the flush
*/
dpu_encoder_kickoff(wb_encoder);
dpu_encoder_kickoff(rt_encoder);
/* Don't start frame done timers until the kickoffs have finished */
dpu_encoder_start_frame_done_timer(wb_encoder);
dpu_encoder_start_frame_done_timer(rt_encoder);
return 0;
}
/** * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc * @crtc: Pointer to drm crtc object
*/ void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
{ struct drm_encoder *encoder; struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc); struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
/* * If no mixers has been allocated in dpu_crtc_atomic_check(), * it means we are trying to start a CRTC whose state is disabled: * nothing else needs to be done.
*/ if (unlikely(!cstate->num_mixers)) return;
DPU_ATRACE_BEGIN("crtc_commit");
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask) { if (!dpu_encoder_is_valid_for_commit(encoder)) {
DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n"); goto end;
}
}
if (drm_crtc_in_clone_mode(crtc->state)) { if (dpu_crtc_kickoff_clone_mode(crtc)) goto end;
} else { /* * Encoder will flush/start now, unless it has a tx pending. * If so, it may delay and flush at an irq event (e.g. ppdone)
*/
drm_for_each_encoder_mask(encoder, crtc->dev,
crtc->state->encoder_mask)
dpu_encoder_prepare_for_kickoff(encoder);
for (i = 0; i < cstate->num_mixers; i++) {
drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0); if (cstate->mixers[i].hw_dspp)
drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
}
}
/* If disable is triggered while in self refresh mode, * reset the encoder software state so that in enable * it won't trigger a warn while assigning crtc.
*/ if (old_crtc_state->self_refresh_active) {
drm_for_each_encoder_mask(encoder, crtc->dev,
old_crtc_state->encoder_mask) {
dpu_encoder_assign_crtc(encoder, NULL);
} return;
}
drm_for_each_encoder_mask(encoder, crtc->dev,
old_crtc_state->encoder_mask) { /* in video mode, we hold an extra bandwidth reference * as we cannot drop bandwidth at frame-done if any * crtc is being used in video mode.
*/ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
release_bandwidth = true;
/* * If disable is triggered during psr active(e.g: screen dim in PSR), * we will need encoder->crtc connection to process the device sleep & * preserve it during psr sequence.
*/ if (!crtc->state->self_refresh_active)
dpu_encoder_assign_crtc(encoder, NULL);
}
/* wait for frame_event_done completion */ if (_dpu_crtc_wait_for_frame_done(crtc))
DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
crtc->base.id,
atomic_read(&dpu_crtc->frame_pending));
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) { /* in video mode, we hold an extra bandwidth reference * as we cannot drop bandwidth at frame-done if any * crtc is being used in video mode.
*/ if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
request_bandwidth = true;
}
if (request_bandwidth)
atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
/* * Datapath topology selection * * Dual display * 2 LM, 2 INTF ( Split display using 2 interfaces) * * Single display * 1 LM, 1 INTF * 2 LM, 1 INTF (stream merge to support high resolution interfaces) * * If DSC is enabled, use 2 LMs for 2:2:1 topology * * Add dspps to the reservation requirements if ctm is requested * * Only hardcode num_lm to 2 for cases where num_intf == 2 and CWB is not * enabled. This is because in cases where CWB is enabled, num_intf will * count both the WB and real-time phys encoders. * * For non-DSC CWB usecases, have the num_lm be decided by the * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check.
*/
/* * Release and Allocate resources on every modeset
*/
global_state = dpu_kms_get_global_state(crtc_state->state); if (IS_ERR(global_state)) return PTR_ERR(global_state);
dpu_rm_release(global_state, crtc);
if (!crtc_state->enable) return 0;
topology = dpu_crtc_get_topology(crtc, dpu_kms, crtc_state);
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
crtc_state->crtc, &topology); if (ret) return ret;
for (i = 0; i < num_lm; i++) { int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]); if (i < num_dspp)
cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
}
cstate->num_mixers = num_lm;
return 0;
}
/** * dpu_crtc_check_mode_changed: check if full modeset is required * @old_crtc_state: Previous CRTC state * @new_crtc_state: Corresponding CRTC state to be checked * * Check if the changes in the object properties demand full mode set.
*/ int dpu_crtc_check_mode_changed(struct drm_crtc_state *old_crtc_state, struct drm_crtc_state *new_crtc_state)
{ struct drm_encoder *drm_enc; struct drm_crtc *crtc = new_crtc_state->crtc; bool clone_mode_enabled = drm_crtc_in_clone_mode(old_crtc_state); bool clone_mode_requested = drm_crtc_in_clone_mode(new_crtc_state);
DRM_DEBUG_ATOMIC("%d\n", crtc->base.id);
/* there might be cases where encoder needs a modeset too */
drm_for_each_encoder_mask(drm_enc, crtc->dev, new_crtc_state->encoder_mask) { if (dpu_encoder_needs_modeset(drm_enc, new_crtc_state->state))
new_crtc_state->mode_changed = true;
}
/* don't reallocate resources if only ACTIVE has beeen changed */ if (crtc_state->mode_changed || crtc_state->connectors_changed) {
rc = dpu_crtc_assign_resources(crtc, crtc_state); if (rc < 0) return rc;
}
if (dpu_use_virtual_planes &&
(crtc_state->planes_changed || crtc_state->zpos_changed)) {
rc = dpu_crtc_reassign_planes(crtc, crtc_state); if (rc < 0) return rc;
}
/* if there is no 3d_mux block we cannot merge LMs so we cannot * split the large layer into 2 LMs, filter out such modes
*/ if (!dpu_kms->catalog->caps->has_3d_merge &&
mode->hdisplay > dpu_kms->catalog->caps->max_mixer_width) return MODE_BAD_HVALUE;
if (dpu_kms->catalog->caps->has_3d_merge)
adjusted_mode_clk /= 2;
/* * The given mode, adjusted for the perf clock factor, should not exceed * the max core clock rate
*/ if (dpu_kms->perf.max_core_clk_rate < adjusted_mode_clk * 1000) return MODE_CLOCK_HIGH;
/* * max crtc width is equal to the max mixer width * 2 and max height is 4K
*/ return drm_mode_validate_size(mode,
2 * dpu_kms->catalog->caps->max_mixer_width,
4096);
}
/** * dpu_crtc_vblank - enable or disable vblanks for this crtc * @crtc: Pointer to drm crtc object * @en: true to enable vblanks, false to disable
*/ int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
{ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc); struct drm_encoder *enc;
/* * Normally we would iterate through encoder_mask in crtc state to find * attached encoders. In this case, we might be disabling vblank _after_ * encoder_mask has been cleared. * * Instead, we "assign" a crtc to the encoder in enable and clear it in * disable (which is also after encoder_mask is cleared). So instead of * using encoder mask, we'll ask the encoder to toggle itself iff it's * currently assigned to our crtc. * * Note also that this function cannot be called while crtc is disabled * since we use drm_crtc_vblank_on/off. So we don't need to worry * about the assigned crtcs being inconsistent with the current state * (which means no need to worry about modeset locks).
*/
list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
dpu_crtc);
for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
list_add(&dpu_crtc->frame_events[i].list,
&dpu_crtc->frame_event_list);
kthread_init_work(&dpu_crtc->frame_events[i].work,
dpu_crtc_frame_event_work);
}
ret = drm_self_refresh_helper_init(crtc); if (ret) {
DPU_ERROR("Failed to initialize %s with self-refresh helpers %d\n",
crtc->name, ret); return ERR_PTR(ret);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.