/* * Expose only those Hardware idle states via the cpuidle framework * that have latency value below POWERNV_THRESHOLD_LATENCY_NS.
*/ #define POWERNV_THRESHOLD_LATENCY_NS 200000
snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
dev->poll_time_limit = false;
ppc64_runlatch_off();
HMT_very_low(); while (!need_resched()) { if (likely(snooze_timeout_en) && get_tb() > snooze_exit_time) { /* * Task has not woken up but we are exiting the polling * loop anyway. Require a barrier after polling is * cleared to order subsequent test of need_resched().
*/
clear_thread_flag(TIF_POLLING_NRFLAG);
dev->poll_time_limit = true;
smp_mb(); break;
}
}
for (idle_state = 0; idle_state < max_idle_state; ++idle_state) { /* Is the state not enabled? */ if (cpuidle_state_table[idle_state].enter == NULL) continue;
/* * On the PowerNV platform cpu_present may be less than cpu_possible in * cases when firmware detects the CPU, but it is not available to the * OS. If CONFIG_HOTPLUG_CPU=n, then such CPUs are not hotplugable at * run time and hence cpu_devices are not created for those CPUs by the * generic topology_init(). * * drv->cpumask defaults to cpu_possible_mask in * __cpuidle_driver_init(). This breaks cpuidle on PowerNV where * cpu_devices are not created for CPUs in cpu_possible_mask that * cannot be hot-added later at run time. * * Trying cpuidle_register_device() on a CPU without a cpu_device is * incorrect, so pass a correct CPU mask to the generic cpuidle driver.
*/
extern u32 pnv_get_supported_cpuidle_states(void); staticint powernv_add_idle_states(void)
{ int nr_idle_states = 1; /* Snooze */ int dt_idle_states;
u32 has_stop_states = 0; int i;
u32 supported_flags = pnv_get_supported_cpuidle_states();
/* Currently we have snooze statically defined */ if (nr_pnv_idle_states <= 0) {
pr_warn("cpuidle-powernv : Only Snooze is available\n"); goto out;
}
/* TODO: Count only states which are eligible for cpuidle */
dt_idle_states = nr_pnv_idle_states;
/* * Since snooze is used as first idle state, max idle states allowed is * CPUIDLE_STATE_MAX -1
*/ if (nr_pnv_idle_states > CPUIDLE_STATE_MAX - 1) {
pr_warn("cpuidle-powernv: discovered idle states more than allowed");
dt_idle_states = CPUIDLE_STATE_MAX - 1;
}
/* * If the idle states use stop instruction, probe for psscr values * and psscr mask which are necessary to specify required stop level.
*/
has_stop_states = (pnv_idle_states[0].flags &
(OPAL_PM_STOP_INST_FAST | OPAL_PM_STOP_INST_DEEP));
for (i = 0; i < dt_idle_states; i++) { unsignedint exit_latency, target_residency; bool stops_timebase = false; struct pnv_idle_states_t *state = &pnv_idle_states[i];
/* * Skip the platform idle state whose flag isn't in * the supported_cpuidle_states flag mask.
*/ if ((state->flags & supported_flags) != state->flags) continue; /* * If an idle state has exit latency beyond * POWERNV_THRESHOLD_LATENCY_NS then don't use it * in cpu-idle.
*/ if (state->latency_ns > POWERNV_THRESHOLD_LATENCY_NS) continue; /* * Firmware passes residency and latency values in ns. * cpuidle expects it in us.
*/
exit_latency = DIV_ROUND_UP(state->latency_ns, 1000);
target_residency = DIV_ROUND_UP(state->residency_ns, 1000);
if (has_stop_states && !(state->valid)) continue;
if (state->flags & OPAL_PM_TIMEBASE_STOP)
stops_timebase = true;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.