/* * Global list of private DSI PLL struct pointers. We need this for bonded DSI * mode, where the master PLL's clk_ops needs access the slave's private data
*/ staticstruct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
writel(analog_controls_five_1, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1);
writel(vco_config_1, base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1);
writel(0x01, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE);
writel(0x03, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER);
writel(0x4e, base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER);
writel(0x40, base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS);
writel(0xba, base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE);
writel(0x0c, base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_OUTDIV);
writel(0x00, base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE);
writel(0x08, base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO);
writel(0x0a, base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1);
writel(0xc0, base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1);
writel(0x84, base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1);
writel(0x82, base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1);
writel(0x4c, base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1);
writel(0x80, base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE);
writel(0x29, base + REG_DSI_7nm_PHY_PLL_PFILT);
writel(0x2f, base + REG_DSI_7nm_PHY_PLL_PFILT);
writel(0x2a, base + REG_DSI_7nm_PHY_PLL_IFILT);
writel(!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) ? 0x3f : 0x22,
base + REG_DSI_7nm_PHY_PLL_IFILT);
if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1)) {
writel(0x22, base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE); if (pll->slave)
writel(0x22, pll->slave->phy->pll_base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE);
}
}
staticvoid dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
{ /* * Reset the PHY digital domain. This would be needed when * coming out of a CX or analog rail power collapse while * ensuring that the pads maintain LP00 or LP11 state
*/
writel(BIT(0), pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4);
wmb(); /* Ensure that the reset is deasserted */
writel(0, pll->phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4);
wmb(); /* Ensure that the reset is deasserted */
}
/* * ensure all PLL configurations are written prior to checking * for PLL lock.
*/
wmb();
/* Check for PLL lock */
rc = dsi_pll_7nm_lock_status(pll_7nm); if (rc) {
pr_err("PLL(%d) lock failed\n", pll_7nm->phy->id); goto error;
}
pll_7nm->phy->pll_on = true;
/* * assert power on reset for PHY digital in case the PLL is * enabled after CX of analog domain power collapse. This needs * to be done before enabling the global clk.
*/
dsi_pll_phy_dig_reset(pll_7nm); if (pll_7nm->slave)
dsi_pll_phy_dig_reset(pll_7nm->slave);
dsi_pll_enable_global_clk(pll_7nm); if (pll_7nm->slave)
dsi_pll_enable_global_clk(pll_7nm->slave);
writel(0x1, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL); if (pll_7nm->slave)
writel(0x1, pll_7nm->slave->phy->base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
/* * To avoid any stray glitches while abruptly powering down the PLL * make sure to gate the clock using the clock enable bit before * powering down the PLL
*/
dsi_pll_disable_global_clk(pll_7nm);
writel(0, pll_7nm->phy->base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
dsi_pll_disable_sub(pll_7nm); if (pll_7nm->slave) {
dsi_pll_disable_global_clk(pll_7nm->slave);
dsi_pll_disable_sub(pll_7nm->slave);
} /* flush, ensure all register writes are done */
wmb();
pll_7nm->phy->pll_on = false;
}
val = readl(pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
val &= ~0x3;
val |= cached->pll_out_div;
writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
switch (phy->usecase) { case MSM_DSI_PHY_STANDALONE: break; case MSM_DSI_PHY_MASTER:
pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX]; /* v7.0: Enable ATB_EN0 and alternate clock output to external phy */ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)
writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5); break; case MSM_DSI_PHY_SLAVE:
data = 0x1; /* external PLL */ break; default: return -EINVAL;
}
/* set PLL src */
dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
return 0;
}
/* * The post dividers and mux clocks are created using the standard divider and * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux * state to follow the master PLL's divider/mux state. Therefore, we don't * require special clock ops that also configure the slave PLL registers
*/ staticint pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provided_clocks)
{ char clk_name[32]; struct clk_init_data vco_init = {
.parent_data = &(conststruct clk_parent_data) {
.fw_name = "ref",
},
.num_parents = 1,
.name = clk_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_7nm_vco,
}; struct device *dev = &pll_7nm->phy->pdev->dev; struct clk_hw *hw, *pll_out_div, *pll_bit, *pll_by_2_bit; struct clk_hw *pll_post_out_div, *phy_pll_out_dsi_parent; int ret;
if (pll_7nm->phy->cphy_mode)
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
dev, clk_name, pll_out_div, 0, 2, 7); else
pll_post_out_div = devm_clk_hw_register_fixed_factor_parent_hw(
dev, clk_name, pll_out_div, 0, 1, 4); if (IS_ERR(pll_post_out_div)) {
ret = PTR_ERR(pll_post_out_div); goto fail;
}
/* in CPHY mode, pclk_mux will always have post_out_div as parent * don't register a pclk_mux clock and just use post_out_div instead
*/ if (pll_7nm->phy->cphy_mode) {
dsi_pll_cmn_clk_cfg1_update(pll_7nm,
DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL__MASK,
DSI_7nm_PHY_CMN_CLK_CFG1_DSICLK_SEL(3));
phy_pll_out_dsi_parent = pll_post_out_div;
} else {
snprintf(clk_name, sizeof(clk_name), "dsi%d_pclk_mux", pll_7nm->phy->id);
ret = pll_7nm_register(pll_7nm, phy->provided_clocks->hws); if (ret) {
DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret); return ret;
}
phy->vco_hw = &pll_7nm->clk_hw;
/* TODO: Remove this when we have proper display handover support */
msm_dsi_phy_pll_save_state(phy); /* * Store also proper vco_current_rate, because its value will be used in * dsi_7nm_pll_restore_state().
*/ if (!dsi_pll_7nm_vco_recalc_rate(&pll_7nm->clk_hw, VCO_REF_CLK_RATE))
pll_7nm->vco_current_rate = pll_7nm->phy->cfg->min_pll_rate;
data = readl(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
mb(); /* make sure read happened */
return (data & BIT(0));
}
staticvoid dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
{ void __iomem *lane_base = phy->lane_base; int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
/* * LPRX and CDRX need to enabled only for physical data lane * corresponding to the logical data lane 0
*/ if (enable)
writel(0x3, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0)); else
writel(0, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0));
}
if (!(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1))
tx_dctrl = tx_dctrl_1;
/* Strength ctrl settings */ for (i = 0; i < 5; i++) { /* * Disable LPRX and CDRX for all lanes. And later on, it will * be only enabled for the physical data lane corresponding * to the logical data lane 0
*/
writel(0, lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i));
writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i));
}
dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
/* other settings */ for (i = 0; i < 5; i++) {
writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_CFG0(i));
writel(0x0, lane_base + REG_DSI_7nm_PHY_LN_CFG1(i));
writel(i == 4 ? 0x8a : 0xa, lane_base + REG_DSI_7nm_PHY_LN_CFG2(i));
writel(tx_dctrl[i], lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i));
}
}
/* de-assert digital and pll power down */
data = BIT(6) | BIT(5);
writel(data, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
/* Assert PLL core reset */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
/* turn off resync FIFO */
writel(0x00, base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL);
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) ||
(readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4);
/* Configure PHY lane swap (TODO: we need to calculate this) */
writel(0x21, base + REG_DSI_7nm_PHY_CMN_LANE_CFG0);
writel(0x84, base + REG_DSI_7nm_PHY_CMN_LANE_CFG1);
if (phy->cphy_mode)
writel(BIT(6), base + REG_DSI_7nm_PHY_CMN_GLBL_CTRL);
/* Enable LDO */
writel(vreg_ctrl_0, base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0);
writel(vreg_ctrl_1, base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_CTRL_3);
writel(glbl_str_swi_cal_sel_ctrl,
base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL);
writel(glbl_hstx_str_ctrl_0,
base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0);
writel(glbl_pemph_ctrl_0,
base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0); if (phy->cphy_mode)
writel(0x01, base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1);
writel(glbl_rescode_top_ctrl,
base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL);
writel(glbl_rescode_bot_ctrl,
base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL);
writel(0x55, base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL);
/* Remove power down from all blocks */
writel(0x7f, base + REG_DSI_7nm_PHY_CMN_CTRL_0);
writel(lane_ctrl0, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0);
/* Select full-rate mode */ if (!phy->cphy_mode)
writel(0x40, base + REG_DSI_7nm_PHY_CMN_CTRL_2);
ret = dsi_7nm_set_usecase(phy); if (ret) {
DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
__func__, ret); return ret;
}
/* DSI PHY timings */ if (phy->cphy_mode) {
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0);
writel(timing->hs_exit, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4);
writel(timing->shared_timings.clk_pre,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5);
writel(timing->clk_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6);
writel(timing->shared_timings.clk_post,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7);
writel(timing->hs_rqst, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8);
writel(0x02, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9);
writel(0x04, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11);
} else {
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0);
writel(timing->clk_zero, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1);
writel(timing->clk_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2);
writel(timing->clk_trail, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3);
writel(timing->hs_exit, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4);
writel(timing->hs_zero, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5);
writel(timing->hs_prepare, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6);
writel(timing->hs_trail, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7);
writel(timing->hs_rqst, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8);
writel(0x02, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9);
writel(0x04, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10);
writel(0x00, base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11);
writel(timing->shared_timings.clk_pre,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12);
writel(timing->shared_timings.clk_post,
base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13);
}
/* DSI lane settings */
dsi_phy_hw_v4_0_lane_settings(phy);
data = readl(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1); if (enable)
data |= BIT(5) | BIT(6); else
data &= ~(BIT(5) | BIT(6));
writel(data, base + REG_DSI_7nm_PHY_CMN_LANE_CTRL1);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.