// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013 Red Hat * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. * * Author: Rob Clark <robdclark@gmail.com>
*/
/* * Two to anticipate panels that can do cmd/vid dynamic switching * plan is to create all possible physical encoder types, and switch between * them at runtime
*/ #define NUM_PHYS_ENCODER_TYPES 2
/* timeout in frames waiting for frame done */ #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
/** * enum dpu_enc_rc_events - events for resource control state machine * @DPU_ENC_RC_EVENT_KICKOFF: * This event happens at NORMAL priority. * Event that signals the start of the transfer. When this event is * received, enable MDP/DSI core clocks. Regardless of the previous * state, the resource should be in ON state at the end of this event. * @DPU_ENC_RC_EVENT_FRAME_DONE: * This event happens at INTERRUPT level. * Event signals the end of the data transfer after the PP FRAME_DONE * event. At the end of this event, a delayed work is scheduled to go to * IDLE_PC state after IDLE_TIMEOUT time. * @DPU_ENC_RC_EVENT_PRE_STOP: * This event happens at NORMAL priority. * This event, when received during the ON state, leave the RC STATE * in the PRE_OFF state. It should be followed by the STOP event as * part of encoder disable. * If received during IDLE or OFF states, it will do nothing. * @DPU_ENC_RC_EVENT_STOP: * This event happens at NORMAL priority. * When this event is received, disable all the MDP/DSI core clocks, and * disable IRQs. It should be called from the PRE_OFF or IDLE states. * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing. * PRE_OFF is expected when PRE_STOP was executed during the ON state. * Resource state should be in OFF at the end of the event. * @DPU_ENC_RC_EVENT_ENTER_IDLE: * This event happens at NORMAL priority from a work item. * Event signals that there were no frame updates for IDLE_TIMEOUT time. * This would disable MDP/DSI core clocks and change the resource state * to IDLE.
*/ enum dpu_enc_rc_events {
DPU_ENC_RC_EVENT_KICKOFF = 1,
DPU_ENC_RC_EVENT_FRAME_DONE,
DPU_ENC_RC_EVENT_PRE_STOP,
DPU_ENC_RC_EVENT_STOP,
DPU_ENC_RC_EVENT_ENTER_IDLE
};
/* * enum dpu_enc_rc_states - states that the resource control maintains * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state * @DPU_ENC_RC_STATE_ON: Resource is in ON state * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
*/ enum dpu_enc_rc_states {
DPU_ENC_RC_STATE_OFF,
DPU_ENC_RC_STATE_PRE_OFF,
DPU_ENC_RC_STATE_ON,
DPU_ENC_RC_STATE_IDLE
};
/** * struct dpu_encoder_virt - virtual encoder. Container of one or more physical * encoders. Virtual encoder manages one "logical" display. Physical * encoders manage one intf block, tied to a specific panel/sub-panel. * Virtual encoder defers as much as possible to the physical encoders. * Virtual encoder registers itself with the DRM Framework as the encoder. * @base: drm_encoder base class for registration with DRM * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes * @enabled: True if the encoder is active, protected by enc_lock * @commit_done_timedout: True if there has been a timeout on commit after * enabling the encoder. * @num_phys_encs: Actual number of physical encoders contained. * @phys_encs: Container of physical encoders managed. * @cur_master: Pointer to the current master in this mode. Optimization * Only valid after enable. Cleared as disable. * @cur_slave: As above but for the slave encoder. * @hw_pp: Handle to the pingpong blocks used for the display. No. * pingpong blocks can be different than num_phys_encs. * @hw_cwb: Handle to the CWB muxes used for concurrent writeback * display. Number of CWB muxes can be different than * num_phys_encs. * @hw_dsc: Handle to the DSC blocks used for the display. * @dsc_mask: Bitmask of used DSC blocks. * @cwb_mask: Bitmask of used CWB muxes * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped * for partial update right-only cases, such as pingpong * split where virtual pingpong does not generate IRQs * @crtc: Pointer to the currently assigned crtc. Normally you * would use crtc->state->encoder_mask to determine the * link between encoder/crtc. However in this case we need * to track crtc in the disable() hook which is called * _after_ encoder_mask is cleared. * @connector: If a mode is set, cached pointer to the active connector * @enc_lock: Lock around physical encoder * create/destroy/enable/disable * @frame_busy_mask: Bitmask tracking which phys_enc we are still * busy processing current command. * Bit0 = phys_encs[0] etc. * @frame_done_timeout_ms: frame done timeout in ms * @frame_done_timeout_cnt: atomic counter tracking the number of frame * done timeouts * @frame_done_timer: watchdog timer for frame done event * @disp_info: local copy of msm_display_info struct * @idle_pc_supported: indicate if idle power collaps is supported * @rc_lock: resource control mutex lock to protect * virt encoder over various state changes * @rc_state: resource controller state * @delayed_off_work: delayed worker to schedule disabling of * clks and resources after IDLE_TIMEOUT time. * @topology: topology of the display * @idle_timeout: idle timeout duration in milliseconds * @wide_bus_en: wide bus is enabled on this interface * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
*/ struct dpu_encoder_virt { struct drm_encoder base;
spinlock_t enc_spinlock;
/** * dpu_encoder_is_dsc_enabled - indicate whether dsc is enabled * for the encoder. * @drm_enc: Pointer to previously created drm encoder structure
*/ bool dpu_encoder_is_dsc_enabled(conststruct drm_encoder *drm_enc)
{ conststruct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
return dpu_enc->dsc ? true : false;
}
/** * dpu_encoder_get_crc_values_cnt - get number of physical encoders contained * in virtual encoder that can collect CRC values * @drm_enc: Pointer to previously created drm encoder structure * Returns: Number of physical encoders for given drm encoder
*/ int dpu_encoder_get_crc_values_cnt(conststruct drm_encoder *drm_enc)
{ struct dpu_encoder_virt *dpu_enc; int i, num_intf = 0;
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->hw_intf && phys->hw_intf->ops.setup_misr
&& phys->hw_intf->ops.collect_misr)
num_intf++;
}
return num_intf;
}
/** * dpu_encoder_setup_misr - enable misr calculations * @drm_enc: Pointer to previously created drm encoder structure
*/ void dpu_encoder_setup_misr(conststruct drm_encoder *drm_enc)
{ struct dpu_encoder_virt *dpu_enc;
int i;
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr) continue;
phys->hw_intf->ops.setup_misr(phys->hw_intf);
}
}
/** * dpu_encoder_get_crc - get the crc value from interface blocks * @drm_enc: Pointer to previously created drm encoder structure * @crcs: array to fill with CRC data * @pos: offset into the @crcs array * Returns: 0 on success, error otherwise
*/ int dpu_encoder_get_crc(conststruct drm_encoder *drm_enc, u32 *crcs, int pos)
{ struct dpu_encoder_virt *dpu_enc;
int i, rc = 0, entries_added = 0;
if (!drm_enc->crtc) {
DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index); return -EINVAL;
}
dpu_enc = to_dpu_encoder_virt(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr) continue;
/** * dpu_encoder_helper_wait_for_irq - utility to wait on an irq. * note: will call dpu_encoder_helper_wait_for_irq on timeout * @phys_enc: Pointer to physical encoder structure * @irq_idx: IRQ index * @func: IRQ callback to be called in case of timeout * @wait_info: wait info struct * @return: 0 or -ERROR
*/ int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc, unsignedint irq_idx, void (*func)(void *arg), struct dpu_encoder_wait_info *wait_info)
{
u32 irq_status; int ret;
if (!wait_info) {
DPU_ERROR("invalid params\n"); return -EINVAL;
} /* note: do master / slave checking outside */
/* return EWOULDBLOCK since we know the wait isn't necessary */ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
DRM_ERROR("encoder is disabled id=%u, callback=%ps, IRQ=[%d, %d]\n",
DRMID(phys_enc->parent), func,
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx)); return -EWOULDBLOCK;
}
/** * dpu_encoder_get_vsync_count - get vsync count for the encoder. * @drm_enc: Pointer to previously created drm encoder structure
*/ int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL; return phys ? atomic_read(&phys->vsync_cnt) : 0;
}
/** * dpu_encoder_get_linecount - get interface line count for the encoder. * @drm_enc: Pointer to previously created drm encoder structure
*/ int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
{ struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_phys *phys; int linecount = 0;
/** * disable split modes since encoder will be operating in as the only * encoder, either for the entire use case in the case of, for example, * single DSI, or for this frame in the case of left/right only partial * update.
*/ if (phys_enc->split_role == ENC_ROLE_SOLO) { if (hw_mdptop->ops.setup_split_pipe)
hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg); return;
}
if (cfg.en && phys_enc->ops.needs_single_flush &&
phys_enc->ops.needs_single_flush(phys_enc))
cfg.split_flush_en = true;
if (phys_enc->split_role == ENC_ROLE_MASTER) {
DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
if (hw_mdptop->ops.setup_split_pipe)
hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
}
}
/** * dpu_encoder_use_dsc_merge - returns true if the encoder uses DSC merge topology. * @drm_enc: Pointer to previously created drm encoder structure
*/ bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
{ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); int i, intf_count = 0, num_dsc = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) if (dpu_enc->phys_encs[i])
intf_count++;
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) if (dpu_enc->hw_dsc[i])
num_dsc++;
return (num_dsc > 0) && (num_dsc > intf_count);
}
/** * dpu_encoder_get_dsc_config - get DSC config for the DPU encoder * This helper function is used by physical encoder to get DSC config * used for this encoder. * @drm_enc: Pointer to encoder structure
*/ struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
{ struct msm_drm_private *priv = drm_enc->dev->dev_private; struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); int index = dpu_enc->disp_info.h_tile_instance[0];
if (dpu_enc->disp_info.intf_type == INTF_DSI) return msm_dsi_get_dsc_config(priv->kms->dsi[index]);
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++) if (dpu_enc->phys_encs[i])
topology->num_intf++;
dsc = dpu_encoder_get_dsc_config(drm_enc);
/* We only support 2 DSC mode (with 2 LM and 1 INTF) */ if (dsc) { /* * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces * when Display Stream Compression (DSC) is enabled, * and when enough DSC blocks are available. * This is power-optimal and can drive up to (including) 4k * screens.
*/
WARN(topology->num_intf > 2, "DSC topology cannot support more than 2 interfaces\n"); if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2)
topology->num_dsc = 2; else
topology->num_dsc = 1;
}
connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); if (!connector) return;
conn_state = drm_atomic_get_new_connector_state(state, connector); if (!conn_state) return;
/* * Use CDM only for writeback or DP at the moment as other interfaces cannot handle it. * If writeback itself cannot handle cdm for some reason it will fail in its atomic_check() * earlier.
*/ if (disp_info->intf_type == INTF_WB && conn_state->writeback_job) {
fb = conn_state->writeback_job->fb;
if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb)))
topology->num_cdm++;
} elseif (disp_info->intf_type == INTF_DP) { if (msm_dp_is_yuv_420_enabled(priv->kms->dp[disp_info->h_tile_instance[0]],
adj_mode))
topology->num_cdm++;
}
}
connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); if (!connector) returnfalse;
conn_state = drm_atomic_get_new_connector_state(state, connector); if (!conn_state) returnfalse;
/** * These checks are duplicated from dpu_encoder_update_topology() since * CRTC and encoder don't hold topology information
*/ if (dpu_enc->disp_info.intf_type == INTF_WB && conn_state->writeback_job) {
fb = conn_state->writeback_job->fb; if (fb && MSM_FORMAT_IS_YUV(msm_framebuffer_format(fb))) { if (!dpu_enc->cur_master->hw_cdm) returntrue;
} else { if (dpu_enc->cur_master->hw_cdm) returntrue;
}
}
/* * when idle_pc is not supported, process only KICKOFF, STOP and MODESET * events and return early for other events (ie wb display).
*/ if (!dpu_enc->idle_pc_supported &&
(sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
sw_event != DPU_ENC_RC_EVENT_STOP &&
sw_event != DPU_ENC_RC_EVENT_PRE_STOP)) return 0;
switch (sw_event) { case DPU_ENC_RC_EVENT_KICKOFF: /* cancel delayed off work, if any */ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
mutex_lock(&dpu_enc->rc_lock);
/* return if the resource control is already in ON state */ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock); return 0;
} elseif (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock); return -EINVAL;
}
if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
_dpu_encoder_irq_enable(drm_enc); else
_dpu_encoder_resource_enable(drm_enc);
case DPU_ENC_RC_EVENT_FRAME_DONE: /* * mutex lock is not used as this event happens at interrupt * context. And locking is not required as, the other events * like KICKOFF and STOP does a wait-for-idle before executing * the resource_control
*/ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state); return -EINVAL;
}
/* * schedule off work item only when there are no * frames pending
*/ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
DRM_DEBUG_KMS("id:%d skip schedule work\n",
DRMID(drm_enc)); return 0;
}
case DPU_ENC_RC_EVENT_PRE_STOP: /* cancel delayed off work, if any */ if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
sw_event);
mutex_lock(&dpu_enc->rc_lock);
if (is_vid_mode &&
dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
_dpu_encoder_irq_enable(drm_enc);
} /* skip if is already OFF or IDLE, resources are off already */ elseif (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
DRMID(drm_enc), sw_event,
dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock); return 0;
}
case DPU_ENC_RC_EVENT_STOP:
mutex_lock(&dpu_enc->rc_lock);
/* return if the resource control is already in OFF state */ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
DRMID(drm_enc), sw_event);
mutex_unlock(&dpu_enc->rc_lock); return 0;
} elseif (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock); return -EINVAL;
}
/** * expect to arrive here only if in either idle state or pre-off * and in IDLE state the resources are already disabled
*/ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
_dpu_encoder_resource_disable(drm_enc);
/* * if we are in ON but a frame was just kicked off, * ignore the IDLE event, it's probably a stale timer event
*/ if (dpu_enc->frame_busy_mask[0]) {
DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
DRMID(drm_enc), sw_event, dpu_enc->rc_state);
mutex_unlock(&dpu_enc->rc_lock); return 0;
}
if (is_vid_mode)
_dpu_encoder_irq_disable(drm_enc); else
_dpu_encoder_resource_disable(drm_enc);
for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
phys->hw_pp = dpu_enc->hw_pp[i]; if (!phys->hw_pp) {
DPU_ERROR_ENC(dpu_enc, "no pp block assigned at idx: %d\n", i); return;
}
/* Use first (and only) CTL if active CTLs are supported */ if (num_ctl == 1)
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[0]); else
phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL; if (!phys->hw_ctl) {
DPU_ERROR_ENC(dpu_enc, "no ctl block assigned at idx: %d\n", i); return;
}
phys->cached_mode = crtc_state->adjusted_mode; if (phys->ops.atomic_mode_set)
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
}
}
if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave); if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc); if (crtc)
old_state = drm_atomic_get_old_crtc_state(state, crtc);
/* * The encoder is already disabled if self refresh mode was set earlier, * in the old_state for the corresponding crtc.
*/ if (old_state && old_state->self_refresh_active) return;
for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->ops.disable)
phys->ops.disable(phys);
}
/* after phys waits for frame-done, should be no more frames pending */ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
timer_delete_sync(&dpu_enc->frame_done_timer);
}
staticstruct dpu_hw_intf *dpu_encoder_get_intf(conststruct dpu_mdss_cfg *catalog, struct dpu_rm *dpu_rm, enum dpu_intf_type type, u32 controller_id)
{ int i = 0;
if (type == INTF_WB) return NULL;
for (i = 0; i < catalog->intf_count; i++) { if (catalog->intf[i].type == type
&& catalog->intf[i].controller_id == controller_id) { return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
}
}
return NULL;
}
/** * dpu_encoder_vblank_callback - Notify virtual encoder of vblank IRQ reception * @drm_enc: Pointer to drm encoder structure * @phy_enc: Pointer to physical encoder * Note: This is called from IRQ handler context.
*/ void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc, struct dpu_encoder_phys *phy_enc)
{ struct dpu_encoder_virt *dpu_enc = NULL; unsignedlong lock_flags;
/** * dpu_encoder_assign_crtc - Link the encoder to the crtc it's assigned to * @drm_enc: encoder pointer * @crtc: crtc pointer
*/ void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
{ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); unsignedlong lock_flags;
spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags); /* crtc should always be cleared before re-assigning */
WARN_ON(crtc && dpu_enc->crtc);
dpu_enc->crtc = crtc;
spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
}
/** * dpu_encoder_toggle_vblank_for_crtc - Toggles vblank interrupts on or off if * the encoder is assigned to the given crtc * @drm_enc: encoder pointer * @crtc: crtc pointer * @enable: true if vblank should be enabled
*/ void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc, bool enable)
{ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); unsignedlong lock_flags; int i;
/* One of the physical encoders has become idle */ for (i = 0; i < dpu_enc->num_phys_encs; i++) { if (dpu_enc->phys_encs[i] == ready_phys) {
trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
dpu_enc->frame_busy_mask[0]);
clear_bit(i, dpu_enc->frame_busy_mask);
}
}
if (!dpu_enc->frame_busy_mask[0]) {
atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_delete(&dpu_enc->frame_done_timer);
/* Return early if encoder is writeback and in clone mode */ if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL &&
dpu_enc->cwb_mask) {
DPU_DEBUG("encoder %d skip flush for concurrent writeback encoder\n",
DRMID(drm_enc)); return;
}
if (extra_flush_bits && ctl->ops.update_pending_flush)
ctl->ops.update_pending_flush(ctl, extra_flush_bits);
ctl->ops.trigger_flush(ctl);
if (ctl->ops.get_pending_flush)
ret = ctl->ops.get_pending_flush(ctl);
if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
phys->ops.trigger_start(phys);
}
/** * dpu_encoder_helper_trigger_start - control start helper function * This helper function may be optionally specified by physical * encoders if they require ctl_start triggering. * @phys_enc: Pointer to physical encoder structure
*/ void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
{ struct dpu_hw_ctl *ctl;
do {
rc = wait_event_timeout(*(info->wq),
atomic_read(info->atomic_cnt) == 0, jiffies);
time = ktime_to_ms(ktime_get());
trace_dpu_enc_wait_event_timeout(drm_id,
DPU_IRQ_REG(irq_idx), DPU_IRQ_BIT(irq_idx),
rc, time,
expected_time,
atomic_read(info->atomic_cnt)); /* If we timed out, counter is valid and time is less, wait again */
} while (atomic_read(info->atomic_cnt) && (rc == 0) &&
(time < expected_time));
/** * _dpu_encoder_kickoff_phys - handle physical encoder kickoff * Iterate through the physical encoders and perform consolidated flush * and/or control start triggering as needed. This is done in the virtual * encoder rather than the individual physical ones in order to handle * use cases that require visibility into multiple physical encoders at * a time. * @dpu_enc: Pointer to virtual encoder structure
*/ staticvoid _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
{ struct dpu_hw_ctl *ctl;
uint32_t i, pending_flush; unsignedlong lock_flags;
/* don't perform flush/start operations for slave encoders */ for (i = 0; i < dpu_enc->num_phys_encs; i++) { struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
if (phys->enable_state == DPU_ENC_DISABLED) continue;
ctl = phys->hw_ctl;
/* * This is cleared in frame_done worker, which isn't invoked * for async commits. So don't set this for async, since it'll * roll over to the next commit.
*/ if (phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask);
/* * For linetime calculation, only operate on master encoder.
*/ if (!dpu_enc->cur_master) return 0;
if (!dpu_enc->cur_master->ops.get_line_count) {
DPU_ERROR("get_line_count function not defined\n"); return 0;
}
pclk_rate = mode->clock; /* pixel clock in kHz */ if (pclk_rate == 0) {
DPU_ERROR("pclk is 0, cannot calculate line time\n"); return 0;
}
pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate); if (pclk_period == 0) {
DPU_ERROR("pclk period is 0\n"); return 0;
}
/* * Line time calculation based on Pixel clock and HTOTAL. * Final unit is in ns.
*/
line_time = (pclk_period * mode->htotal) / 1000; if (line_time == 0) {
DPU_ERROR("line time calculation is 0\n"); return 0;
}
/** * dpu_encoder_vsync_time - get the time of the next vsync * @drm_enc: encoder pointer * @wakeup_time: pointer to ktime_t to write the vsync time to
*/ int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
{ struct drm_display_mode *mode; struct dpu_encoder_virt *dpu_enc;
u32 cur_line;
u32 line_time;
u32 vtotal, time_to_vsync;
ktime_t cur_time;
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (!drm_enc->crtc || !drm_enc->crtc->state) {
DPU_ERROR("crtc/crtc state object is NULL\n"); return -EINVAL;
}
mode = &drm_enc->crtc->state->adjusted_mode;
line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode); if (!line_time) return -EINVAL;
/* * minimum number of initial line pixels is a sum of: * 1. sub-stream multiplexer delay (83 groups for 8bpc, * 91 for 10 bpc) * 3 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3 * 3. the initial xmit delay * 4. total pipeline delay through the "lock step" of encoder (47) * 5. 6 additional pixels as the output of the rate buffer is * 48 bits wide
*/
ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47; if (soft_slice_per_enc > 1)
total_pixels += (ssm_delay * 3); return DIV_ROUND_UP(total_pixels, dsc->slice_width);
}
for (i = 0; i < num_dsc; i++)
dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
dsc, dsc_common_mode, initial_lines);
}
/** * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl * path (i.e. ctl flush and start) at next appropriate time. * Immediately: if no previous commit is outstanding. * Delayed: Block until next trigger can be issued. * @drm_enc: encoder pointer
*/ void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
{ struct dpu_encoder_virt *dpu_enc; struct dpu_encoder_phys *phys; bool needs_hw_reset = false; unsignedint i;
dpu_enc = to_dpu_encoder_virt(drm_enc);
trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
/* prepare for next kickoff, may include waiting on previous kickoff */
DPU_ATRACE_BEGIN("enc_prepare_for_kickoff"); for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i]; if (phys->ops.prepare_for_kickoff)
phys->ops.prepare_for_kickoff(phys); if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
needs_hw_reset = true;
}
DPU_ATRACE_END("enc_prepare_for_kickoff");
/* if any phys needs reset, reset all phys, in-order */ if (needs_hw_reset) {
trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc)); for (i = 0; i < dpu_enc->num_phys_encs; i++) {
dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
}
}
if (dpu_enc->dsc)
dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
}
/** * dpu_encoder_is_valid_for_commit - check if encode has valid parameters for commit. * @drm_enc: Pointer to drm encoder structure
*/ bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
{ struct dpu_encoder_virt *dpu_enc; unsignedint i; struct dpu_encoder_phys *phys;
dpu_enc = to_dpu_encoder_virt(drm_enc);
if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) { for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i]; if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
DPU_DEBUG("invalid FB not kicking off\n"); returnfalse;
}
}
}
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc);
/* allow phys encs to handle any post-kickoff business */ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
phys = dpu_enc->phys_encs[i]; if (phys->ops.handle_post_kickoff)
phys->ops.handle_post_kickoff(phys);
}
for (i = 0; i < num_lm; i++) {
hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]); if (ctl->ops.update_pending_flush_mixer)
ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
/* clear all blendstages */ if (ctl->ops.setup_blendstage)
ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
if (hw_mixer[i]->ops.clear_all_blendstages)
hw_mixer[i]->ops.clear_all_blendstages(hw_mixer[i]);
if (ctl->ops.set_active_lms)
ctl->ops.set_active_lms(ctl, NULL);
if (ctl->ops.set_active_fetch_pipes)
ctl->ops.set_active_fetch_pipes(ctl, NULL);
if (ctl->ops.set_active_pipes)
ctl->ops.set_active_pipes(ctl, NULL);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.