/* * We can't figure out what rate it will be, so just return the * rate back to the caller. scmi_clk_recalc_rate() will be called * after the rate is set and we'll know what rate the clock is * running at then.
*/ if (clk->info->rate_discrete) return rate;
ret = scmi_proto_clk_ops->parent_get(clk->ph, clk->id, &parent_id); if (ret) return 0;
for (p_idx = 0; p_idx < clk->info->num_parents; p_idx++) { if (clk->parent_data[p_idx].index == parent_id) break;
}
if (p_idx == clk->info->num_parents) return 0;
return p_idx;
}
staticint scmi_clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
{ /* * Suppose all the requested rates are supported, and let firmware * to handle the left work.
*/ return 0;
}
ret = scmi_proto_clk_ops->state_get(clk->ph, clk->id, &enabled, atomic); if (ret)
dev_warn(clk->dev, "Failed to get state for clock ID %d\n", clk->id);
/* SCMI OEM Duty Cycle is expressed as a percentage */
val = (duty->num * 100) / duty->den;
ret = scmi_proto_clk_ops->config_oem_set(clk->ph, clk->id,
SCMI_CLOCK_CFG_DUTY_CYCLE,
val, false); if (ret)
dev_warn(clk->dev, "Failed to set duty cycle(%u/%u) for clock ID %d\n",
duty->num, duty->den, clk->id);
/** * scmi_clk_ops_alloc() - Alloc and configure clock operations * @dev: A device reference for devres * @feats_key: A bitmap representing the desired clk_ops capabilities * * Allocate and configure a proper set of clock operations depending on the * specifically required SCMI clock features. * * Return: A pointer to the allocated and configured clk_ops on success, * or NULL on allocation failure.
*/ staticconststruct clk_ops *
scmi_clk_ops_alloc(struct device *dev, unsignedlong feats_key)
{ struct clk_ops *ops;
ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL); if (!ops) return NULL; /* * We can provide enable/disable/is_enabled atomic callbacks only if the * underlying SCMI transport for an SCMI instance is configured to * handle SCMI commands in an atomic manner. * * When no SCMI atomic transport support is available we instead provide * only the prepare/unprepare API, as allowed by the clock framework * when atomic calls are not available.
*/ if (feats_key & BIT(SCMI_CLK_STATE_CTRL_SUPPORTED)) { if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) {
ops->enable = scmi_clk_atomic_enable;
ops->disable = scmi_clk_atomic_disable;
} else {
ops->prepare = scmi_clk_enable;
ops->unprepare = scmi_clk_disable;
}
}
/** * scmi_clk_ops_select() - Select a proper set of clock operations * @sclk: A reference to an SCMI clock descriptor * @atomic_capable: A flag to indicate if atomic mode is supported by the * transport * @atomic_threshold_us: Platform atomic threshold value in microseconds: * clk_ops are atomic when clock enable latency is less * than this threshold * @clk_ops_db: A reference to the array used as a database to store all the * created clock operations combinations. * @db_size: Maximum number of entries held by @clk_ops_db * * After having built a bitmap descriptor to represent the set of features * needed by this SCMI clock, at first use it to lookup into the set of * previously allocated clk_ops to check if a suitable combination of clock * operations was already created; when no match is found allocate a brand new * set of clk_ops satisfying the required combination of features and save it * for future references. * * In this way only one set of clk_ops is ever created for each different * combination that is effectively needed by a driver instance. * * Return: A pointer to the allocated and configured clk_ops on success, or * NULL otherwise.
*/ staticconststruct clk_ops *
scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable, unsignedint atomic_threshold_us, conststruct clk_ops **clk_ops_db, size_t db_size)
{ int ret;
u32 val; conststruct scmi_clock_info *ci = sclk->info; unsignedint feats_key = 0; conststruct clk_ops *ops;
/* * Note that when transport is atomic but SCMI protocol did not * specify (or support) an enable_latency associated with a * clock, we default to use atomic operations mode.
*/ if (atomic_capable && ci->enable_latency <= atomic_threshold_us)
feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
if (!ci->state_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_STATE_CTRL_SUPPORTED);
if (!ci->rate_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_RATE_CTRL_SUPPORTED);
if (!ci->parent_ctrl_forbidden)
feats_key |= BIT(SCMI_CLK_PARENT_CTRL_SUPPORTED);
if (ci->extended_config) {
ret = scmi_proto_clk_ops->config_oem_get(sclk->ph, sclk->id,
SCMI_CLOCK_CFG_DUTY_CYCLE,
&val, NULL, false); if (!ret)
feats_key |= BIT(SCMI_CLK_DUTY_CYCLE_SUPPORTED);
}
sclk->info = scmi_proto_clk_ops->info_get(ph, idx); if (!sclk->info) {
dev_dbg(dev, "invalid clock info for idx %d\n", idx);
hws[idx] = NULL; continue;
}
sclk->id = idx;
sclk->ph = ph;
sclk->dev = dev;
/* * Note that the scmi_clk_ops_db is on the stack, not global, * because it cannot be shared between multiple probe-sequences * to avoid sharing the devm_ allocated clk_ops between multiple * SCMI clk driver instances.
*/
scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic,
atomic_threshold_us,
scmi_clk_ops_db,
ARRAY_SIZE(scmi_clk_ops_db)); if (!scmi_ops) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.