/* * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS * operation, the operation could start just before jiffy is about * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
*/ #define TIMEOUT_US 20000
/** * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs * * @set: if true, global wake-up IRQs are set, if false they are cleared * * Function to set/clear global wakeup IRQs. Not protected by locking since * it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity.
*/ void ve_spc_global_wakeup_irq(bool set)
{
u32 reg;
/** * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @cpu: mpidr[7:0] bitfield describing cpu affinity level * @set: if true, wake-up IRQs are set, if false they are cleared * * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since * it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity.
*/ void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
{
u32 mask, reg;
/** * ve_spc_powerdown() - enables/disables cluster powerdown * * @cluster: mpidr[15:8] bitfield describing cluster affinity level * @enable: if true enables powerdown, if false disables it * * Function to enable/disable cluster powerdown. Not protected by locking * since it might be used in code paths where normal cacheable locks are not * working. Locking must be provided by the caller to ensure atomicity.
*/ void ve_spc_powerdown(u32 cluster, bool enable)
{
u32 pwdrn_reg;
/** * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not * * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster * @cluster: mpidr[15:8] bitfield describing cluster affinity level * * @return: non-zero if and only if the specified CPU is in WFI * * Take care when interpreting the result of this function: a CPU might * be in WFI temporarily due to idle, and is not necessarily safely * parked.
*/ int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster)
{ int ret;
u32 mask = standbywfi_cpu_mask(cpu, cluster);
if (cluster >= MAX_CLUSTERS) return 1;
ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT);
/* find closest match to given frequency in OPP table */ staticint ve_spc_round_performance(int cluster, u32 freq)
{ int idx, max_opp = info->num_opps[cluster]; struct ve_spc_opp *opps = info->opps[cluster];
u32 fmin = 0, fmax = ~0, ftmp;
freq /= 1000; /* OPP entries in kHz */ for (idx = 0; idx < max_opp; idx++, opps++) {
ftmp = opps->freq; if (ftmp >= freq) { if (ftmp <= fmax)
fmax = ftmp;
} else { if (ftmp >= fmin)
fmin = ftmp;
}
} if (fmax != ~0) return fmax * 1000; else return fmin * 1000;
}
for (idx = 0; idx < max_opp; idx++, opps++) {
ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); if (ret) {
dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
opps->freq, opps->u_volt); return ret;
}
} return ret;
}
int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
{ int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM;
sema_init(&info->sem, 1); /* * Multi-cluster systems may need this data when non-coherent, during * cluster power-up/power-down. Make sure driver info reaches main * memory.
*/
sync_cache_w(info);
sync_cache_w(&info);
return 0;
}
struct clk_spc { struct clk_hw hw; int cluster;
};
if (!info) return 0; /* Continue only if SPC is initialised */
if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
pr_err("failed to build OPP table\n"); return -ENODEV;
}
for_each_possible_cpu(cpu) { struct device *cpu_dev = get_cpu_device(cpu); if (!cpu_dev) {
pr_warn("failed to get cpu%d device\n", cpu); continue;
}
clk = ve_spc_clk_register(cpu_dev); if (IS_ERR(clk)) {
pr_warn("failed to register cpu%d clock\n", cpu); continue;
} if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
pr_warn("failed to register cpu%d clock lookup\n", cpu); continue;
}
cluster = topology_physical_package_id(cpu_dev->id); if (cluster < 0 || init_opp_table[cluster]) continue;
if (ve_init_opp_table(cpu_dev))
pr_warn("failed to initialise cpu%d opp table\n", cpu); elseif (dev_pm_opp_set_sharing_cpus(cpu_dev,
topology_core_cpumask(cpu_dev->id)))
pr_warn("failed to mark OPPs shared for cpu%d\n", cpu); else
init_opp_table[cluster] = true;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.