// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2023-2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
staticinlinevoid __iomem *
tcs_reg_addr(conststruct rsc_drv *drv, int reg, int tcs_id)
{ return drv->tcs_base + drv->regs[RSC_DRV_TCS_OFFSET] * tcs_id + reg;
}
staticinlinevoid __iomem *
tcs_cmd_addr(conststruct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
{ return tcs_reg_addr(drv, reg, tcs_id) + drv->regs[RSC_DRV_CMD_OFFSET] * cmd_id;
}
static u32 read_tcs_cmd(conststruct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
{ return readl_relaxed(tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
static u32 read_tcs_reg(conststruct rsc_drv *drv, int reg, int tcs_id)
{ return readl_relaxed(tcs_reg_addr(drv, reg, tcs_id));
}
staticvoid write_tcs_cmd(conststruct rsc_drv *drv, int reg, int tcs_id, int cmd_id, u32 data)
{
writel_relaxed(data, tcs_cmd_addr(drv, reg, tcs_id, cmd_id));
}
staticvoid write_tcs_reg(conststruct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{
writel_relaxed(data, tcs_reg_addr(drv, reg, tcs_id));
}
staticvoid write_tcs_reg_sync(conststruct rsc_drv *drv, int reg, int tcs_id,
u32 data)
{ int i;
writel(data, tcs_reg_addr(drv, reg, tcs_id));
/* * Wait until we read back the same value. Use a counter rather than * ktime for timeout since this may be called after timekeeping stops.
*/ for (i = 0; i < USEC_PER_SEC; i++) { if (readl(tcs_reg_addr(drv, reg, tcs_id)) == data) return;
udelay(1);
}
pr_err("%s: error writing %#x to %d:%#x\n", drv->name,
data, tcs_id, reg);
}
/** * tcs_invalidate() - Invalidate all TCSes of the given type (sleep or wake). * @drv: The RSC controller. * @type: SLEEP_TCS or WAKE_TCS * * This will clear the "slots" variable of the given tcs_group and also * tell the hardware to forget about all entries. * * The caller must ensure that no other RPMH actions are happening when this * function is called, since otherwise the device may immediately become * used again even before this function exits.
*/ staticvoid tcs_invalidate(struct rsc_drv *drv, int type)
{ int m; struct tcs_group *tcs = &drv->tcs[type];
/* Caller ensures nobody else is running so no lock */ if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) return;
for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++)
write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], m, 0);
bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
}
/** * rpmh_rsc_invalidate() - Invalidate sleep and wake TCSes. * @drv: The RSC controller. * * The caller must ensure that no other RPMH actions are happening when this * function is called, since otherwise the device may immediately become * used again even before this function exits.
*/ void rpmh_rsc_invalidate(struct rsc_drv *drv)
{
tcs_invalidate(drv, SLEEP_TCS);
tcs_invalidate(drv, WAKE_TCS);
}
/** * get_tcs_for_msg() - Get the tcs_group used to send the given message. * @drv: The RSC controller. * @msg: The message we want to send. * * This is normally pretty straightforward except if we are trying to send * an ACTIVE_ONLY message but don't have any active_only TCSes. * * Return: A pointer to a tcs_group or an ERR_PTR.
*/ staticstruct tcs_group *get_tcs_for_msg(struct rsc_drv *drv, conststruct tcs_request *msg)
{ int type; struct tcs_group *tcs;
switch (msg->state) { case RPMH_ACTIVE_ONLY_STATE:
type = ACTIVE_TCS; break; case RPMH_WAKE_ONLY_STATE:
type = WAKE_TCS; break; case RPMH_SLEEP_STATE:
type = SLEEP_TCS; break; default: return ERR_PTR(-EINVAL);
}
/* * If we are making an active request on a RSC that does not have a * dedicated TCS for active state use, then re-purpose a wake TCS to * send active votes. This is safe because we ensure any active-only * transfers have finished before we use it (maybe by running from * the last CPU in PM code).
*/
tcs = &drv->tcs[type]; if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs)
tcs = &drv->tcs[WAKE_TCS];
return tcs;
}
/** * get_req_from_tcs() - Get a stashed request that was xfering on the given TCS. * @drv: The RSC controller. * @tcs_id: The global ID of this TCS. * * For ACTIVE_ONLY transfers we want to call back into the client when the * transfer finishes. To do this we need the "request" that the client * originally provided us. This function grabs the request that we stashed * when we started the transfer. * * This only makes sense for ACTIVE_ONLY transfers since those are the only * ones we track sending (the only ones we enable interrupts for and the only * ones we call back to the client for). * * Return: The stashed request.
*/ staticconststruct tcs_request *get_req_from_tcs(struct rsc_drv *drv, int tcs_id)
{ struct tcs_group *tcs; int i;
for (i = 0; i < TCS_TYPE_NR; i++) {
tcs = &drv->tcs[i]; if (tcs->mask & BIT(tcs_id)) return tcs->req[tcs_id - tcs->offset];
}
return NULL;
}
/** * __tcs_set_trigger() - Start xfer on a TCS or unset trigger on a borrowed TCS * @drv: The controller. * @tcs_id: The global ID of this TCS. * @trigger: If true then untrigger/retrigger. If false then just untrigger. * * In the normal case we only ever call with "trigger=true" to start a * transfer. That will un-trigger/disable the TCS from the last transfer * then trigger/enable for this transfer. * * If we borrowed a wake TCS for an active-only transfer we'll also call * this function with "trigger=false" to just do the un-trigger/disable * before using the TCS for wake purposes again. * * Note that the AP is only in charge of triggering active-only transfers. * The AP never triggers sleep/wake values using this function.
*/ staticvoid __tcs_set_trigger(struct rsc_drv *drv, int tcs_id, bool trigger)
{
u32 enable;
u32 reg = drv->regs[RSC_DRV_CONTROL];
/* * HW req: Clear the DRV_CONTROL and enable TCS again * While clearing ensure that the AMC mode trigger is cleared * and then the mode enable is cleared.
*/
enable = read_tcs_reg(drv, reg, tcs_id);
enable &= ~TCS_AMC_MODE_TRIGGER;
write_tcs_reg_sync(drv, reg, tcs_id, enable);
enable &= ~TCS_AMC_MODE_ENABLE;
write_tcs_reg_sync(drv, reg, tcs_id, enable);
if (trigger) { /* Enable the AMC mode on the TCS and then trigger the TCS */
enable = TCS_AMC_MODE_ENABLE;
write_tcs_reg_sync(drv, reg, tcs_id, enable);
enable |= TCS_AMC_MODE_TRIGGER;
write_tcs_reg(drv, reg, tcs_id, enable);
}
}
/** * enable_tcs_irq() - Enable or disable interrupts on the given TCS. * @drv: The controller. * @tcs_id: The global ID of this TCS. * @enable: If true then enable; if false then disable * * We only ever call this when we borrow a wake TCS for an active-only * transfer. For active-only TCSes interrupts are always left enabled.
*/ staticvoid enable_tcs_irq(struct rsc_drv *drv, int tcs_id, bool enable)
{
u32 data;
u32 reg = drv->regs[RSC_DRV_IRQ_ENABLE];
data = readl_relaxed(drv->tcs_base + reg); if (enable)
data |= BIT(tcs_id); else
data &= ~BIT(tcs_id);
writel_relaxed(data, drv->tcs_base + reg);
}
/** * tcs_tx_done() - TX Done interrupt handler. * @irq: The IRQ number (ignored). * @p: Pointer to "struct rsc_drv". * * Called for ACTIVE_ONLY transfers (those are the only ones we enable the * IRQ for) when a transfer is done. * * Return: IRQ_HANDLED
*/ static irqreturn_t tcs_tx_done(int irq, void *p)
{ struct rsc_drv *drv = p; int i; unsignedlong irq_status; conststruct tcs_request *req;
/* Clear AMC trigger & enable modes and * disable interrupt for this TCS
*/
__tcs_set_trigger(drv, i, false);
skip: /* Reclaim the TCS */
write_tcs_reg(drv, drv->regs[RSC_DRV_CMD_ENABLE], i, 0);
writel_relaxed(BIT(i), drv->tcs_base + drv->regs[RSC_DRV_IRQ_CLEAR]);
spin_lock(&drv->lock);
clear_bit(i, drv->tcs_in_use); /* * Disable interrupt for WAKE TCS to avoid being * spammed with interrupts coming when the solver * sends its wake votes.
*/ if (!drv->tcs[ACTIVE_TCS].num_tcs)
enable_tcs_irq(drv, i, false);
spin_unlock(&drv->lock);
wake_up(&drv->tcs_wait); if (req)
rpmh_tx_done(req);
}
return IRQ_HANDLED;
}
/** * __tcs_buffer_write() - Write to TCS hardware from a request; don't trigger. * @drv: The controller. * @tcs_id: The global ID of this TCS. * @cmd_id: The index within the TCS to start writing. * @msg: The message we want to send, which will contain several addr/data * pairs to program (but few enough that they all fit in one TCS). * * This is used for all types of transfers (active, sleep, and wake).
*/ staticvoid __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id, conststruct tcs_request *msg)
{
u32 msgid;
u32 cmd_msgid = CMD_MSGID_LEN | CMD_MSGID_WRITE;
u32 cmd_enable = 0; struct tcs_cmd *cmd; int i, j;
/* Convert all commands to RR when the request has wait_for_compl set */
cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
cmd = &msg->cmds[i];
cmd_enable |= BIT(j);
msgid = cmd_msgid; /* * Additionally, if the cmd->wait is set, make the command * response reqd even if the overall request was fire-n-forget.
*/
msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
/** * check_for_req_inflight() - Look to see if conflicting cmds are in flight. * @drv: The controller. * @tcs: A pointer to the tcs_group used for ACTIVE_ONLY transfers. * @msg: The message we want to send, which will contain several addr/data * pairs to program (but few enough that they all fit in one TCS). * * This will walk through the TCSes in the group and check if any of them * appear to be sending to addresses referenced in the message. If it finds * one it'll return -EBUSY. * * Only for use for active-only transfers. * * Must be called with the drv->lock held since that protects tcs_in_use. * * Return: 0 if nothing in flight or -EBUSY if we should try again later. * The caller must re-enable interrupts between tries since that's * the only way tcs_in_use will ever be updated and the only way * RSC_DRV_CMD_ENABLE will ever be cleared.
*/ staticint check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs, conststruct tcs_request *msg)
{ unsignedlong curr_enabled;
u32 addr; int j, k; int i = tcs->offset;
for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
addr = read_tcs_cmd(drv, drv->regs[RSC_DRV_CMD_ADDR], i, j); for (k = 0; k < msg->num_cmds; k++) { if (cmd_db_match_resource_addr(msg->cmds[k].addr, addr)) return -EBUSY;
}
}
}
return 0;
}
/** * find_free_tcs() - Find free tcs in the given tcs_group; only for active. * @tcs: A pointer to the active-only tcs_group (or the wake tcs_group if * we borrowed it because there are zero active-only ones). * * Must be called with the drv->lock held since that protects tcs_in_use. * * Return: The first tcs that's free or -EBUSY if all in use.
*/ staticint find_free_tcs(struct tcs_group *tcs)
{ conststruct rsc_drv *drv = tcs->drv; unsignedlong i; unsignedlong max = tcs->offset + tcs->num_tcs;
i = find_next_zero_bit(drv->tcs_in_use, max, tcs->offset); if (i >= max) return -EBUSY;
return i;
}
/** * claim_tcs_for_req() - Claim a tcs in the given tcs_group; only for active. * @drv: The controller. * @tcs: The tcs_group used for ACTIVE_ONLY transfers. * @msg: The data to be sent. * * Claims a tcs in the given tcs_group while making sure that no existing cmd * is in flight that would conflict with the one in @msg. * * Context: Must be called with the drv->lock held since that protects * tcs_in_use. * * Return: The id of the claimed tcs or -EBUSY if a matching msg is in flight * or the tcs_group is full.
*/ staticint claim_tcs_for_req(struct rsc_drv *drv, struct tcs_group *tcs, conststruct tcs_request *msg)
{ int ret;
/* * The h/w does not like if we send a request to the same address, * when one is already in-flight or being processed.
*/
ret = check_for_req_inflight(drv, tcs, msg); if (ret) return ret;
return find_free_tcs(tcs);
}
/** * rpmh_rsc_send_data() - Write / trigger active-only message. * @drv: The controller. * @msg: The data to be sent. * * NOTES: * - This is only used for "ACTIVE_ONLY" since the limitations of this * function don't make sense for sleep/wake cases. * - To do the transfer, we will grab a whole TCS for ourselves--we don't * try to share. If there are none available we'll wait indefinitely * for a free one. * - This function will not wait for the commands to be finished, only for * data to be programmed into the RPMh. See rpmh_tx_done() which will * be called when the transfer is fully complete. * - This function must be called with interrupts enabled. If the hardware * is busy doing someone else's transfer we need that transfer to fully * finish so that we can have the hardware, and to fully finish it needs * the interrupt handler to run. If the interrupts is set to run on the * active CPU this can never happen if interrupts are disabled. * * Return: 0 on success, -EINVAL on error.
*/ int rpmh_rsc_send_data(struct rsc_drv *drv, conststruct tcs_request *msg)
{ struct tcs_group *tcs; int tcs_id;
might_sleep();
tcs = get_tcs_for_msg(drv, msg); if (IS_ERR(tcs)) return PTR_ERR(tcs);
spin_lock_irq(&drv->lock);
/* Wait forever for a free tcs. It better be there eventually! */
wait_event_lock_irq(drv->tcs_wait,
(tcs_id = claim_tcs_for_req(drv, tcs, msg)) >= 0,
drv->lock);
tcs->req[tcs_id - tcs->offset] = msg;
set_bit(tcs_id, drv->tcs_in_use); if (msg->state == RPMH_ACTIVE_ONLY_STATE && tcs->type != ACTIVE_TCS) { /* * Clear previously programmed WAKE commands in selected * repurposed TCS to avoid triggering them. tcs->slots will be * cleaned from rpmh_flush() by invoking rpmh_rsc_invalidate()
*/
write_tcs_reg_sync(drv, drv->regs[RSC_DRV_CMD_ENABLE], tcs_id, 0);
enable_tcs_irq(drv, tcs_id, true);
}
spin_unlock_irq(&drv->lock);
/* * These two can be done after the lock is released because: * - We marked "tcs_in_use" under lock. * - Once "tcs_in_use" has been marked nobody else could be writing * to these registers until the interrupt goes off. * - The interrupt can't go off until we trigger w/ the last line * of __tcs_set_trigger() below.
*/
__tcs_buffer_write(drv, tcs_id, 0, msg);
__tcs_set_trigger(drv, tcs_id, true);
return 0;
}
/** * find_slots() - Find a place to write the given message. * @tcs: The tcs group to search. * @msg: The message we want to find room for. * @tcs_id: If we return 0 from the function, we return the global ID of the * TCS to write to here. * @cmd_id: If we return 0 from the function, we return the index of * the command array of the returned TCS where the client should * start writing the message. * * Only for use on sleep/wake TCSes since those are the only ones we maintain * tcs->slots for. * * Return: -ENOMEM if there was no room, else 0.
*/ staticint find_slots(struct tcs_group *tcs, conststruct tcs_request *msg, int *tcs_id, int *cmd_id)
{ int slot, offset; int i = 0;
/* Do over, until we can fit the full payload in a single TCS */ do {
slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
i, msg->num_cmds, 0); if (slot >= tcs->num_tcs * tcs->ncpt) return -ENOMEM;
i += tcs->ncpt;
} while (slot + msg->num_cmds - 1 >= i);
/** * rpmh_rsc_write_ctrl_data() - Write request to controller but don't trigger. * @drv: The controller. * @msg: The data to be written to the controller. * * This should only be called for sleep/wake state, never active-only * state. * * The caller must ensure that no other RPMH actions are happening and the * controller is idle when this function is called since it runs lockless. * * Return: 0 if no error; else -error.
*/ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, conststruct tcs_request *msg)
{ struct tcs_group *tcs; int tcs_id = 0, cmd_id = 0; int ret;
tcs = get_tcs_for_msg(drv, msg); if (IS_ERR(tcs)) return PTR_ERR(tcs);
/* find the TCS id and the command in the TCS to write to */
ret = find_slots(tcs, msg, &tcs_id, &cmd_id); if (!ret)
__tcs_buffer_write(drv, tcs_id, cmd_id, msg);
return ret;
}
/** * rpmh_rsc_ctrlr_is_busy() - Check if any of the AMCs are busy. * @drv: The controller * * Checks if any of the AMCs are busy in handling ACTIVE sets. * This is called from the last cpu powering down before flushing * SLEEP and WAKE sets. If AMCs are busy, controller can not enter * power collapse, so deny from the last cpu's pm notification. * * Context: Must be called with the drv->lock held. * * Return: * * False - AMCs are idle * * True - AMCs are busy
*/ staticbool rpmh_rsc_ctrlr_is_busy(struct rsc_drv *drv)
{ unsignedlong set; conststruct tcs_group *tcs = &drv->tcs[ACTIVE_TCS]; unsignedlong max;
/* * If we made an active request on a RSC that does not have a * dedicated TCS for active state use, then re-purposed wake TCSes * should be checked for not busy, because we used wake TCSes for * active requests in this case.
*/ if (!tcs->num_tcs)
tcs = &drv->tcs[WAKE_TCS];
max = tcs->offset + tcs->num_tcs;
set = find_next_bit(drv->tcs_in_use, max, tcs->offset);
return set < max;
}
/** * rpmh_rsc_write_next_wakeup() - Write next wakeup in CONTROL_TCS. * @drv: The controller * * Writes maximum wakeup cycles when called from suspend. * Writes earliest hrtimer wakeup when called from idle.
*/ void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv)
{
ktime_t now, wakeup;
u64 wakeup_us, wakeup_cycles = ~0;
u32 lo, hi;
if (!drv->tcs[CONTROL_TCS].num_tcs || !drv->genpd_nb.notifier_call) return;
/* Set highest time when system (timekeeping) is suspended */ if (system_state == SYSTEM_SUSPEND) gotoexit;
/* Find the earliest hrtimer wakeup from online cpus */
wakeup = dev_pm_genpd_get_next_hrtimer(drv->dev);
/* Find the relative wakeup in kernel time scale */
now = ktime_get();
wakeup = ktime_sub(wakeup, now);
wakeup_us = ktime_to_us(wakeup);
/* Convert the wakeup to arch timer scale */
wakeup_cycles = USECS_TO_CYCLES(wakeup_us);
wakeup_cycles += arch_timer_read_counter();
exit:
lo = wakeup_cycles & RSC_DRV_CTL_TCS_DATA_LO_MASK;
hi = wakeup_cycles >> RSC_DRV_CTL_TCS_DATA_SIZE;
hi &= RSC_DRV_CTL_TCS_DATA_HI_MASK;
hi |= RSC_DRV_CTL_TCS_DATA_HI_VALID;
/** * rpmh_rsc_cpu_pm_callback() - Check if any of the AMCs are busy. * @nfb: Pointer to the notifier block in struct rsc_drv. * @action: CPU_PM_ENTER, CPU_PM_ENTER_FAILED, or CPU_PM_EXIT. * @v: Unused * * This function is given to cpu_pm_register_notifier so we can be informed * about when CPUs go down. When all CPUs go down we know no more active * transfers will be started so we write sleep/wake sets. This function gets * called from cpuidle code paths and also at system suspend time. * * If its last CPU going down and AMCs are not busy then writes cached sleep * and wake messages to TCSes. The firmware then takes care of triggering * them when entering deepest low power modes. * * Return: See cpu_pm_register_notifier()
*/ staticint rpmh_rsc_cpu_pm_callback(struct notifier_block *nfb, unsignedlong action, void *v)
{ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, rsc_pm); int ret = NOTIFY_OK; int cpus_in_pm;
switch (action) { case CPU_PM_ENTER:
cpus_in_pm = atomic_inc_return(&drv->cpus_in_pm); /* * NOTE: comments for num_online_cpus() point out that it's * only a snapshot so we need to be careful. It should be OK * for us to use, though. It's important for us not to miss * if we're the last CPU going down so it would only be a * problem if a CPU went offline right after we did the check * AND that CPU was not idle AND that CPU was the last non-idle * CPU. That can't happen. CPUs would have to come out of idle * before the CPU could go offline.
*/ if (cpus_in_pm < num_online_cpus()) return NOTIFY_OK; break; case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT:
atomic_dec(&drv->cpus_in_pm); return NOTIFY_OK; default: return NOTIFY_DONE;
}
/* * It's likely we're on the last CPU. Grab the drv->lock and write * out the sleep/wake commands to RPMH hardware. Grabbing the lock * means that if we race with another CPU coming up we are still * guaranteed to be safe. If another CPU came up just after we checked * and has grabbed the lock or started an active transfer then we'll * notice we're busy and abort. If another CPU comes up after we start * flushing it will be blocked from starting an active transfer until * we're done flushing. If another CPU starts an active transfer after * we release the lock we're still OK because we're no longer the last * CPU.
*/ if (spin_trylock(&drv->lock)) { if (rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))
ret = NOTIFY_BAD;
spin_unlock(&drv->lock);
} else { /* Another CPU must be up */ return NOTIFY_OK;
}
if (ret == NOTIFY_BAD) { /* Double-check if we're here because someone else is up */ if (cpus_in_pm < num_online_cpus())
ret = NOTIFY_OK; else /* We won't be called w/ CPU_PM_ENTER_FAILED */
atomic_dec(&drv->cpus_in_pm);
}
return ret;
}
/** * rpmh_rsc_pd_callback() - Check if any of the AMCs are busy. * @nfb: Pointer to the genpd notifier block in struct rsc_drv. * @action: GENPD_NOTIFY_PRE_OFF, GENPD_NOTIFY_OFF, GENPD_NOTIFY_PRE_ON or GENPD_NOTIFY_ON. * @v: Unused * * This function is given to dev_pm_genpd_add_notifier() so we can be informed * about when cluster-pd is going down. When cluster go down we know no more active * transfers will be started so we write sleep/wake sets. This function gets * called from cpuidle code paths and also at system suspend time. * * If AMCs are not busy then writes cached sleep and wake messages to TCSes. * The firmware then takes care of triggering them when entering deepest low power modes. * * Return: * * NOTIFY_OK - success * * NOTIFY_BAD - failure
*/ staticint rpmh_rsc_pd_callback(struct notifier_block *nfb, unsignedlong action, void *v)
{ struct rsc_drv *drv = container_of(nfb, struct rsc_drv, genpd_nb);
/* We don't need to lock as genpd on/off are serialized */ if ((action == GENPD_NOTIFY_PRE_OFF) &&
(rpmh_rsc_ctrlr_is_busy(drv) || rpmh_flush(&drv->client))) return NOTIFY_BAD;
return NOTIFY_OK;
}
staticint rpmh_rsc_pd_attach(struct rsc_drv *drv, struct device *dev)
{ int ret;
pm_runtime_enable(dev);
drv->genpd_nb.notifier_call = rpmh_rsc_pd_callback;
ret = dev_pm_genpd_add_notifier(dev, &drv->genpd_nb); if (ret)
pm_runtime_disable(dev);
n = of_property_count_u32_elems(dn, "qcom,tcs-config"); if (n != 2 * TCS_TYPE_NR) return -EINVAL;
for (i = 0; i < TCS_TYPE_NR; i++) {
ret = of_property_read_u32_index(dn, "qcom,tcs-config",
i * 2, &tcs_cfg[i].type); if (ret) return ret; if (tcs_cfg[i].type >= TCS_TYPE_NR) return -EINVAL;
ret = of_property_read_u32_index(dn, "qcom,tcs-config",
i * 2 + 1, &tcs_cfg[i].n); if (ret) return ret; if (tcs_cfg[i].n > MAX_TCS_PER_TYPE) return -EINVAL;
}
for (i = 0; i < TCS_TYPE_NR; i++) {
tcs = &drv->tcs[tcs_cfg[i].type]; if (tcs->drv) return -EINVAL;
tcs->drv = drv;
tcs->type = tcs_cfg[i].type;
tcs->num_tcs = tcs_cfg[i].n;
tcs->ncpt = ncpt;
if (!tcs->num_tcs || tcs->type == CONTROL_TCS) continue;
if (st + tcs->num_tcs > max_tcs ||
st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask)) return -EINVAL;
/* * Even though RPMh doesn't directly use cmd-db, all of its children * do. To avoid adding this check to our children we'll do it now.
*/
ret = cmd_db_ready(); if (ret) return dev_err_probe(&pdev->dev, ret, "Command DB not available\n");
drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL); if (!drv) return -ENOMEM;
ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id); if (ret) return ret;
drv->name = of_get_property(dn, "label", NULL); if (!drv->name)
drv->name = dev_name(&pdev->dev);
irq = platform_get_irq(pdev, drv->id); if (irq < 0) return irq;
ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
drv->name, drv); if (ret) return ret;
/* * CPU PM/genpd notification are not required for controllers that support * 'HW solver' mode where they can be in autonomous mode executing low * power mode to power down.
*/
solver_config = readl_relaxed(drv->base + drv->regs[DRV_SOLVER_CONFIG]);
solver_config &= DRV_HW_SOLVER_MASK << DRV_HW_SOLVER_SHIFT;
solver_config = solver_config >> DRV_HW_SOLVER_SHIFT; if (!solver_config) { if (pdev->dev.pm_domain) {
ret = rpmh_rsc_pd_attach(drv, &pdev->dev); if (ret) return ret;
} else {
drv->rsc_pm.notifier_call = rpmh_rsc_cpu_pm_callback;
cpu_pm_register_notifier(&drv->rsc_pm);
}
}
/* Enable the active TCS to send requests immediately */
writel_relaxed(drv->tcs[ACTIVE_TCS].mask,
drv->tcs_base + drv->regs[RSC_DRV_IRQ_ENABLE]);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.