/** * __rpmh_write: Cache and send the RPMH request * * @dev: The device making the request * @state: Active/Sleep request type * @rpm_msg: The data that needs to be sent (cmds). * * Cache the RPMH request and send if the state is ACTIVE_ONLY. * SLEEP/WAKE_ONLY requests are not sent to the controller at * this time. Use rpmh_flush() to send them to the controller.
*/ staticint __rpmh_write(conststruct device *dev, enum rpmh_state state, struct rpmh_request *rpm_msg)
{ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); int ret = -EINVAL; struct cache_req *req; int i;
/* Cache the request in our store and link the payload */ for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]); if (IS_ERR(req)) return PTR_ERR(req);
}
if (state == RPMH_ACTIVE_ONLY_STATE) {
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else { /* Clean up our call by spoofing tx_done */
ret = 0;
rpmh_tx_done(&rpm_msg->msg);
}
/** * rpmh_write_async: Write a set of RPMH commands * * @dev: The device making the request * @state: Active/sleep set * @cmd: The payload data * @n: The number of elements in payload * * Write a set of RPMH commands, the order of commands is maintained * and will be sent as a single shot.
*/ int rpmh_write_async(conststruct device *dev, enum rpmh_state state, conststruct tcs_cmd *cmd, u32 n)
{ struct rpmh_request *rpm_msg; int ret;
/** * rpmh_write: Write a set of RPMH commands and block until response * * @dev: The device making the request * @state: Active/sleep set * @cmd: The payload data * @n: The number of elements in @cmd * * May sleep. Do not call from atomic contexts.
*/ int rpmh_write(conststruct device *dev, enum rpmh_state state, conststruct tcs_cmd *cmd, u32 n)
{
DECLARE_COMPLETION_ONSTACK(compl);
DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg); int ret;
ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n); if (ret) return ret;
ret = __rpmh_write(dev, state, &rpm_msg); if (ret) return ret;
staticint flush_batch(struct rpmh_ctrlr *ctrlr)
{ struct batch_cache_req *req; conststruct rpmh_request *rpm_msg; int ret = 0; int i;
/* Send Sleep/Wake requests to the controller, expect no response */
list_for_each_entry(req, &ctrlr->batch_cache, list) { for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
&rpm_msg->msg); if (ret) break;
}
}
return ret;
}
/** * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the * batch to finish. * * @dev: the device making the request * @state: Active/sleep set * @cmd: The payload data * @n: The array of count of elements in each batch, 0 terminated. * * Write a request to the RSC controller without caching. If the request * state is ACTIVE, then the requests are treated as completion request * and sent to the controller immediately. The function waits until all the * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the * request is sent as fire-n-forget and no ack is expected. * * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
*/ int rpmh_write_batch(conststruct device *dev, enum rpmh_state state, conststruct tcs_cmd *cmd, u32 *n)
{ struct batch_cache_req *req; struct rpmh_request *rpm_msgs; struct completion *compls; struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); unsignedlong time_left; int count = 0; int ret, i; void *ptr;
if (!cmd || !n) return -EINVAL;
while (n[count] > 0)
count++; if (!count) return -EINVAL;
time_left = RPMH_TIMEOUT_MS; while (i--) {
time_left = wait_for_completion_timeout(&compls[i], time_left); if (!time_left) { /* * Better hope they never finish because they'll signal * the completion that we're going to free once * we've returned from this function.
*/
WARN_ON(1);
ret = -ETIMEDOUT; gotoexit;
}
}
/** * rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes * * @ctrlr: Controller making request to flush cached data * * Return: * * 0 - Success * * Error code - Otherwise
*/ int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{ struct cache_req *p; int ret = 0;
lockdep_assert_irqs_disabled();
/* * Currently rpmh_flush() is only called when we think we're running * on the last processor. If the lock is busy it means another * processor is up and it's better to abort than spin.
*/ if (!spin_trylock(&ctrlr->cache_lock)) return -EBUSY;
if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n"); goto write_next_wakeup;
}
/* Invalidate the TCSes first to avoid stale data */
rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
/* First flush the cached batch requests */
ret = flush_batch(ctrlr); if (ret) gotoexit;
list_for_each_entry(p, &ctrlr->cache, list) { if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
__func__, p->addr, p->sleep_val, p->wake_val); continue;
}
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
p->sleep_val); if (ret) gotoexit;
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
p->wake_val); if (ret) gotoexit;
}
/** * rpmh_invalidate: Invalidate sleep and wake sets in batch_cache * * @dev: The device making the request * * Invalidate the sleep and wake values in batch_cache.
*/ void rpmh_invalidate(conststruct device *dev)
{ struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev); struct batch_cache_req *req, *tmp; unsignedlong flags;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.