/* * CTL - MDP Control Pool Manager * * Controls are shared between all display interfaces. * * They are intended to be used for data path configuration. * The top level register programming describes the complete data path for * a specific data path ID - REG_MDP5_CTL_*(<id>, ...) * * Hardware capabilities determine the number of concurrent data paths * * In certain use cases (high-resolution dual pipe), one single CTL can be * shared across multiple CRTCs.
*/
switch (intf->type) { case INTF_WB: returntrue; case INTF_DSI: return intf->mode == MDP5_INTF_DSI_MODE_COMMAND; default: returnfalse;
}
}
/* * send_start_signal() - Overlay Processor Start Signal * * For a given control operation (display pipeline), a START signal needs to be * executed in order to kick off operation and activate all layers. * e.g.: DSI command mode, Writeback
*/ staticvoid send_start_signal(struct mdp5_ctl *ctl)
{ unsignedlong flags;
/** * mdp5_ctl_set_encoder_state() - set the encoder state * * @ctl: the CTL instance * @pipeline: the encoder's INTF + MIXER configuration * @enabled: true, when encoder is ready for data streaming; false, otherwise. * * Note: * This encoder state is needed to trigger START signal (data path kickoff).
*/ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, bool enabled)
{ struct mdp5_interface *intf = pipeline->intf;
switch (pipe) { case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3; case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3; case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3; case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3; case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3; case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3; case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3; case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3; case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3; case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3; case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage); case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage); default: return 0;
}
}
/* for some targets, cursor bit is the same as LM bit */ if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
/** * mdp5_ctl_commit() - Register Flush * * @ctl: the CTL instance * @pipeline: the encoder's INTF + MIXER configuration * @flush_mask: bitmask of display controller hw blocks to flush * @start: if true, immediately update flush registers and set START * bit, otherwise accumulate flush_mask bits until we are * ready to START * * The flush register is used to indicate several registers are all * programmed, and are safe to update to the back copy of the double * buffered registers. * * Some registers FLUSH bits are shared when the hardware does not have * dedicated bits for them; handling these is the job of fix_sw_flush(). * * CTL registers need to be flushed in some circumstances; if that is the * case, some trigger bits will be present in both flush mask and * ctl->pending_ctl_trigger. * * Return H/W flushed bit mask.
*/
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
u32 flush_mask, bool start)
{ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; unsignedlong flags;
u32 flush_id = ctl->id;
u32 curr_ctl_flush_mask;
/* * mdp5_ctl_request() - CTL allocation * * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs. * If no CTL is available in preferred category, allocate from the other one. * * @return fail if no CTL is available.
*/ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr, int intf_num)
{ struct mdp5_ctl *ctl = NULL; const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0; unsignedlong flags; int c;
spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
/* search the preferred */ for (c = 0; c < ctl_mgr->nctl; c++) if ((ctl_mgr->ctls[c].status & checkm) == match) goto found;
dev_warn(ctl_mgr->dev->dev, "fall back to the other CTL category for INTF %d!\n", intf_num);
match ^= CTL_STAT_BOOKED; for (c = 0; c < ctl_mgr->nctl; c++) if ((ctl_mgr->ctls[c].status & checkm) == match) goto found;
DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); goto unlock;
ctl_mgr = devm_kzalloc(dev->dev, sizeof(*ctl_mgr), GFP_KERNEL); if (!ctl_mgr) {
DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); return ERR_PTR(-ENOMEM);
}
if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
ctl_cfg->count); return ERR_PTR(-ENOSPC);
}
/* initialize each CTL of the pool: */
spin_lock_irqsave(&ctl_mgr->pool_lock, flags); for (c = 0; c < ctl_mgr->nctl; c++) { struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
if (WARN_ON(!ctl_cfg->base[c])) {
DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
ret = -EINVAL;
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags); return ERR_PTR(ret);
}
ctl->ctlm = ctl_mgr;
ctl->id = c;
ctl->reg_offset = ctl_cfg->base[c];
ctl->status = 0;
spin_lock_init(&ctl->hw_lock);
}
/* * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when * only write into CTL0's FLUSH register) to keep two DSI pipes in sync. * Single FLUSH is supported from hw rev v3.0.
*/ for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++) if (hw_cfg->intf.connect[c] == INTF_DSI)
dsi_cnt++; if ((rev >= 3) && (dsi_cnt > 1)) {
ctl_mgr->single_flush_supported = true; /* Reserve CTL0/1 for INTF1/2 */
ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
}
spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.