val = qos->prio_level;
mask = M_BKE_HEALTH_CFG_PRIOLVL_MASK;
val |= qos->areq_prio << M_BKE_HEALTH_CFG_AREQPRIO_SHIFT;
mask |= M_BKE_HEALTH_CFG_AREQPRIO_MASK;
/* LIMITCMDS is not present on M_BKE_HEALTH_3 */ if (regnum != 3) {
val |= qos->limit_commands << M_BKE_HEALTH_CFG_LIMITCMDS_SHIFT;
mask |= M_BKE_HEALTH_CFG_LIMITCMDS_MASK;
}
if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
mode = qn->qos.qos_mode;
/* QoS Priority: The QoS Health parameters are getting considered * only if we are NOT in Bypass Mode.
*/ if (mode != NOC_QOS_MODE_BYPASS) { for (i = 3; i >= 0; i--) {
rc = qcom_icc_bimc_set_qos_health(qp,
&qn->qos, i); if (rc) return rc;
}
/* Set BKE_EN to 1 when Fixed, Regulator or Limiter Mode */
val = 1;
}
/* Must be updated one at a time, P1 first, P0 last */
val = qos->areq_prio << NOC_QOS_PRIORITY_P1_SHIFT;
rc = regmap_update_bits(qp->regmap,
qp->qos_offset + NOC_QOS_PRIORITYn_ADDR(qos->qos_port),
NOC_QOS_PRIORITY_P1_MASK, val); if (rc) return rc;
if (qn->mas_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(rpm_ctx,
RPM_BUS_MASTER_REQ,
qn->mas_rpm_id,
bw_bps); if (ret) {
pr_err("qcom_icc_rpm_smd_send mas %d error %d\n",
qn->mas_rpm_id, ret); return ret;
}
}
if (qn->slv_rpm_id != -1) {
ret = qcom_icc_rpm_smd_send(rpm_ctx,
RPM_BUS_SLAVE_REQ,
qn->slv_rpm_id,
bw_bps); if (ret) {
pr_err("qcom_icc_rpm_smd_send slv %d error %d\n",
qn->slv_rpm_id, ret); return ret;
}
}
}
return 0;
}
/** * qcom_icc_pre_bw_aggregate - cleans up values before re-aggregate requests * @node: icc node to operate on
*/ staticvoid qcom_icc_pre_bw_aggregate(struct icc_node *node)
{ struct qcom_icc_node *qn;
size_t i;
qn = node->data; for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
}
/** * qcom_icc_bw_aggregate - aggregate bw for buckets indicated by tag * @node: node to aggregate * @tag: tag to indicate which buckets to aggregate * @avg_bw: new bw to sum aggregate * @peak_bw: new bw to max aggregate * @agg_avg: existing aggregate avg bw val * @agg_peak: existing aggregate peak bw val
*/ staticint qcom_icc_bw_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
{
size_t i; struct qcom_icc_node *qn;
qn = node->data;
if (!tag)
tag = RPM_ALWAYS_TAG;
for (i = 0; i < QCOM_SMD_RPM_STATE_NUM; i++) { if (tag & BIT(i)) {
qn->sum_avg[i] += avg_bw;
qn->max_peak[i] = max_t(u32, qn->max_peak[i], peak_bw);
}
}
ret = qcom_icc_rpm_set(src_qn, src_qn->sum_avg); if (ret) return ret;
if (dst_qn) {
ret = qcom_icc_rpm_set(dst_qn, dst_qn->sum_avg); if (ret) return ret;
}
/* Some providers don't have a bus clock to scale */ if (!qp->bus_clk_desc && !qp->bus_clk) return 0;
/* * Downstream checks whether the requested rate is zero, but it makes little sense * to vote for a value that's below the lower threshold, so let's not do so.
*/ if (qp->keep_alive)
active_rate = max(ICC_BUS_CLK_MIN_RATE, active_rate);
/* Some providers have a non-RPM-owned bus clock - convert kHz->Hz for the CCF */ if (qp->bus_clk) {
active_rate = max_t(u64, active_rate, sleep_rate); /* ARM32 caps clk_set_rate arg to u32.. Nothing we can do about that! */
active_rate = min_t(u64, 1000ULL * active_rate, ULONG_MAX); return clk_set_rate(qp->bus_clk, active_rate);
}
if (active_rate != qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
active_rate); if (ret) return ret;
/* Cache the rate after we've successfully commited it to RPM */
qp->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
}
if (sleep_rate != qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
ret = qcom_icc_rpm_set_bus_rate(qp->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
sleep_rate); if (ret) return ret;
/* Cache the rate after we've successfully commited it to RPM */
qp->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
}
/* Handle the node-specific clock */ if (!src_qn->bus_clk_desc) return 0;
if (active_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE]) {
ret = qcom_icc_rpm_set_bus_rate(src_qn->bus_clk_desc, QCOM_SMD_RPM_ACTIVE_STATE,
active_rate); if (ret) return ret;
/* Cache the rate after we've successfully committed it to RPM */
src_qn->bus_clk_rate[QCOM_SMD_RPM_ACTIVE_STATE] = active_rate;
}
if (sleep_rate != src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE]) {
ret = qcom_icc_rpm_set_bus_rate(src_qn->bus_clk_desc, QCOM_SMD_RPM_SLEEP_STATE,
sleep_rate); if (ret) return ret;
/* Cache the rate after we've successfully committed it to RPM */
src_qn->bus_clk_rate[QCOM_SMD_RPM_SLEEP_STATE] = sleep_rate;
}
if (desc->num_intf_clocks) {
cds = desc->intf_clocks;
cd_num = desc->num_intf_clocks;
} else { /* 0 intf clocks is perfectly fine */
cd_num = 0;
}
qp = devm_kzalloc(dev, sizeof(*qp), GFP_KERNEL); if (!qp) return -ENOMEM;
qp->intf_clks = devm_kcalloc(dev, cd_num, sizeof(*qp->intf_clks), GFP_KERNEL); if (!qp->intf_clks) return -ENOMEM;
if (desc->bus_clk_desc) {
qp->bus_clk_desc = devm_kzalloc(dev, sizeof(*qp->bus_clk_desc),
GFP_KERNEL); if (!qp->bus_clk_desc) return -ENOMEM;
qp->bus_clk_desc = desc->bus_clk_desc;
} else { /* Some older SoCs may have a single non-RPM-owned bus clock. */
qp->bus_clk = devm_clk_get_optional(dev, "bus"); if (IS_ERR(qp->bus_clk)) return PTR_ERR(qp->bus_clk);
}
data = devm_kzalloc(dev, struct_size(data, nodes, num_nodes),
GFP_KERNEL); if (!data) return -ENOMEM;
data->num_nodes = num_nodes;
qp->num_intf_clks = cd_num; for (i = 0; i < cd_num; i++)
qp->intf_clks[i].id = cds[i];
/* If this fails, bus accesses will crash the platform! */
ret = clk_bulk_prepare_enable(qp->num_intf_clks, qp->intf_clks); if (ret) goto err_disable_unprepare_clk;
for (i = 0; i < num_nodes; i++) {
size_t j;
if (!qnodes[i]->ab_coeff)
qnodes[i]->ab_coeff = qp->ab_coeff;
if (!qnodes[i]->ib_coeff)
qnodes[i]->ib_coeff = qp->ib_coeff;
node = icc_node_create(qnodes[i]->id); if (IS_ERR(node)) {
clk_bulk_disable_unprepare(qp->num_intf_clks,
qp->intf_clks);
ret = PTR_ERR(node); goto err_remove_nodes;
}
for (j = 0; j < qnodes[i]->num_links; j++)
icc_link_create(node, qnodes[i]->links[j]);
/* Set QoS registers (we only need to do it once, generally) */ if (qnodes[i]->qos.ap_owned &&
qnodes[i]->qos.qos_mode != NOC_QOS_MODE_INVALID) {
ret = qcom_icc_qos_set(node); if (ret) {
clk_bulk_disable_unprepare(qp->num_intf_clks,
qp->intf_clks); goto err_remove_nodes;
}
}
ret = icc_provider_register(provider); if (ret) goto err_remove_nodes;
platform_set_drvdata(pdev, qp);
/* Populate child NoC devices if any */ if (of_get_child_count(dev->of_node) > 0) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev); if (ret) goto err_deregister_provider;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.