/** * struct cpuhp_cpu_state - Per cpu hotplug state storage * @state: The current cpu state * @target: The target state * @fail: Current CPU hotplug callback state * @thread: Pointer to the hotplug thread * @should_run: Thread should execute * @rollback: Perform a rollback * @single: Single callback invocation * @bringup: Single callback bringup or teardown selector * @node: Remote CPU node; for multi-instance, do a * single entry callback for install/remove * @last: For multi-instance rollback, remember how far we got * @cb_state: The state for a single callback (install/uninstall) * @result: Result of the operation * @ap_sync_state: State for AP synchronization * @done_up: Signal completion to the issuer of the task for cpu-up * @done_down: Signal completion to the issuer of the task for cpu-down
*/ struct cpuhp_cpu_state { enum cpuhp_state state; enum cpuhp_state target; enum cpuhp_state fail; #ifdef CONFIG_SMP struct task_struct *thread; bool should_run; bool rollback; bool single; bool bringup; struct hlist_node *node; struct hlist_node *last; enum cpuhp_state cb_state; int result;
atomic_t ap_sync_state; struct completion done_up; struct completion done_down; #endif
};
/** * struct cpuhp_step - Hotplug state machine step * @name: Name of the step * @startup: Startup function of the step * @teardown: Teardown function of the step * @cant_stop: Bringup/teardown can't be stopped at this step * @multi_instance: State has multiple instances which get added afterwards
*/ struct cpuhp_step { constchar *name; union { int (*single)(unsignedint cpu); int (*multi)(unsignedint cpu, struct hlist_node *node);
} startup; union { int (*single)(unsignedint cpu); int (*multi)(unsignedint cpu, struct hlist_node *node);
} teardown; /* private: */ struct hlist_head list; /* public: */ bool cant_stop; bool multi_instance;
};
/** * cpuhp_invoke_callback - Invoke the callbacks for a given state * @cpu: The cpu for which the callback should be invoked * @state: The state to do callbacks for * @bringup: True if the bringup callback should be invoked * @node: For multi-instance, do a single entry callback for install/remove * @lastp: For multi-instance rollback, remember how far we got * * Called from cpu hotplug and from the state register machinery. * * Return: %0 on success or a negative errno code
*/ staticint cpuhp_invoke_callback(unsignedint cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node, struct hlist_node **lastp)
{ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct cpuhp_step *step = cpuhp_get_step(state); int (*cbm)(unsignedint cpu, struct hlist_node *node); int (*cb)(unsignedint cpu); int ret, cnt;
/* Single invocation for instance add/remove */ if (node) {
WARN_ON_ONCE(lastp && *lastp);
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret); return ret;
}
/* State transition. Invoke on all instances */
cnt = 0;
hlist_for_each(node, &step->list) { if (lastp && node == *lastp) break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret); if (ret) { if (!lastp) goto err;
*lastp = node; return ret;
}
cnt++;
} if (lastp)
*lastp = NULL; return 0;
err: /* Rollback the instances if one failed */
cbm = !bringup ? step->startup.multi : step->teardown.multi; if (!cbm) return ret;
hlist_for_each(node, &step->list) { if (!cnt--) break;
trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
ret = cbm(cpu, node);
trace_cpuhp_exit(cpu, st->state, state, ret); /* * Rollback must not fail,
*/
WARN_ON_ONCE(ret);
} return ret;
}
#ifdef CONFIG_SMP staticbool cpuhp_is_ap_state(enum cpuhp_state state)
{ /* * The extra check for CPUHP_TEARDOWN_CPU is only for documentation * purposes as that state is handled explicitly in cpu_down.
*/ return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
}
/* * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
*/ staticbool cpuhp_is_atomic_state(enum cpuhp_state state)
{ return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
}
#ifdef CONFIG_HOTPLUG_CORE_SYNC /** * cpuhp_ap_update_sync_state - Update synchronization state during bringup/teardown * @state: The synchronization state to set * * No synchronization point. Just update of the synchronization state, but implies * a full barrier so that the AP changes are visible before the control CPU proceeds.
*/ staticinlinevoid cpuhp_ap_update_sync_state(enum cpuhp_sync_state state)
{
atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
sync = atomic_read(st); while (1) { if (sync == state) { if (!atomic_try_cmpxchg(st, &sync, next_state)) continue; returntrue;
}
now = ktime_get(); if (now > end) { /* Timeout. Leave the state unchanged */ returnfalse;
} elseif (now - start < NSEC_PER_MSEC) { /* Poll for one millisecond */
arch_cpuhp_sync_state_poll();
} else {
usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
}
sync = atomic_read(st);
} returntrue;
} #else/* CONFIG_HOTPLUG_CORE_SYNC */ staticinlinevoid cpuhp_ap_update_sync_state(enum cpuhp_sync_state state) { } #endif/* !CONFIG_HOTPLUG_CORE_SYNC */
#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD /** * cpuhp_ap_report_dead - Update synchronization state to DEAD * * No synchronization point. Just update of the synchronization state.
*/ void cpuhp_ap_report_dead(void)
{
cpuhp_ap_update_sync_state(SYNC_STATE_DEAD);
}
/* * Late CPU shutdown synchronization point. Cannot use cpuhp_state::done_down * because the AP cannot issue complete() at this stage.
*/ staticvoid cpuhp_bp_sync_dead(unsignedint cpu)
{
atomic_t *st = per_cpu_ptr(&cpuhp_state.ap_sync_state, cpu); int sync = atomic_read(st);
do { /* CPU can have reported dead already. Don't overwrite that! */ if (sync == SYNC_STATE_DEAD) break;
} while (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_SHOULD_DIE));
if (cpuhp_wait_for_sync_state(cpu, SYNC_STATE_DEAD, SYNC_STATE_DEAD)) { /* CPU reached dead state. Invoke the cleanup function */
arch_cpuhp_cleanup_dead_cpu(cpu); return;
}
/* No further action possible. Emit message and give up. */
pr_err("CPU%u failed to report dead state\n", cpu);
} #else/* CONFIG_HOTPLUG_CORE_SYNC_DEAD */ staticinlinevoid cpuhp_bp_sync_dead(unsignedint cpu) { } #endif/* !CONFIG_HOTPLUG_CORE_SYNC_DEAD */
#ifdef CONFIG_HOTPLUG_CORE_SYNC_FULL /** * cpuhp_ap_sync_alive - Synchronize AP with the control CPU once it is alive * * Updates the AP synchronization state to SYNC_STATE_ALIVE and waits * for the BP to release it.
*/ void cpuhp_ap_sync_alive(void)
{
atomic_t *st = this_cpu_ptr(&cpuhp_state.ap_sync_state);
cpuhp_ap_update_sync_state(SYNC_STATE_ALIVE);
/* Wait for the control CPU to release it. */ while (atomic_read(st) != SYNC_STATE_SHOULD_ONLINE)
cpu_relax();
}
again: switch (sync) { case SYNC_STATE_DEAD: /* CPU is properly dead */ break; case SYNC_STATE_KICKED: /* CPU did not come up in previous attempt */ break; case SYNC_STATE_ALIVE: /* CPU is stuck cpuhp_ap_sync_alive(). */ break; default: /* CPU failed to report online or dead and is in limbo state. */ returnfalse;
}
/* Prepare for booting */ if (!atomic_try_cmpxchg(st, &sync, SYNC_STATE_KICKED)) goto again;
/* * Early CPU bringup synchronization point. Cannot use cpuhp_state::done_up * because the AP cannot issue complete() so early in the bringup.
*/ staticint cpuhp_bp_sync_alive(unsignedint cpu)
{ int ret = 0;
if (!IS_ENABLED(CONFIG_HOTPLUG_CORE_SYNC_FULL)) return 0;
if (!cpuhp_wait_for_sync_state(cpu, SYNC_STATE_ALIVE, SYNC_STATE_SHOULD_ONLINE)) {
pr_err("CPU%u failed to report alive state\n", cpu);
ret = -EIO;
}
/* Serializes the updates to cpu_online_mask, cpu_present_mask */ static DEFINE_MUTEX(cpu_add_remove_lock); bool cpuhp_tasks_frozen;
EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
/* * The following two APIs (cpu_maps_update_begin/done) must be used when * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
*/ void cpu_maps_update_begin(void)
{
mutex_lock(&cpu_add_remove_lock);
}
/* * If set, cpu_up and cpu_down will return -EBUSY and do nothing. * Should always be manipulated under cpu_add_remove_lock
*/ staticint cpu_hotplug_disabled;
void lockdep_assert_cpus_held(void)
{ /* * We can't have hotplug operations before userspace starts running, * and some init codepaths will knowingly not take the hotplug lock. * This is all valid, so mute lockdep until it makes sense to report * unheld locks.
*/ if (system_state < SYSTEM_RUNNING) return;
/* Declare CPU offlining not supported */ void cpu_hotplug_disable_offlining(void)
{
cpu_maps_update_begin();
cpu_hotplug_offline_disabled = true;
cpu_maps_update_done();
}
/* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the * hotplug path before performing hotplug operations. So acquiring that lock * guarantees mutual exclusion from any currently running hotplug operations.
*/ void cpu_hotplug_disable(void)
{
cpu_maps_update_begin();
cpu_hotplug_disabled++;
cpu_maps_update_done();
}
EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
staticvoid __cpu_hotplug_enable(void)
{ if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n")) return;
cpu_hotplug_disabled--;
}
/* * The decision whether SMT is supported can only be done after the full * CPU identification. Called from architecture code.
*/ void __init cpu_smt_set_num_threads(unsignedint num_threads, unsignedint max_threads)
{
WARN_ON(!num_threads || (num_threads > max_threads));
if (max_threads == 1)
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
cpu_smt_max_threads = max_threads;
/* * If SMT has been disabled via the kernel command line or SMT is * not supported, set cpu_smt_num_threads to 1 for consistency. * If enabled, take the architecture requested number of threads * to bring up into account.
*/ if (cpu_smt_control != CPU_SMT_ENABLED)
cpu_smt_num_threads = 1; elseif (num_threads < cpu_smt_num_threads)
cpu_smt_num_threads = num_threads;
}
/* * For Archicture supporting partial SMT states check if the thread is allowed. * Otherwise this has already been checked through cpu_smt_max_threads when * setting the SMT level.
*/ staticinlinebool cpu_smt_thread_allowed(unsignedint cpu)
{ #ifdef CONFIG_SMT_NUM_THREADS_DYNAMIC return topology_smt_thread_allowed(cpu); #else returntrue; #endif
}
/* All CPUs are bootable if controls are not configured */ if (cpu_smt_control == CPU_SMT_NOT_IMPLEMENTED) returntrue;
/* All CPUs are bootable if CPU is not SMT capable */ if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED) returntrue;
if (topology_is_primary_thread(cpu)) returntrue;
/* * On x86 it's required to boot all logical CPUs at least once so * that the init code can get a chance to set CR4.MCE on each * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any * core will shutdown the machine.
*/ return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
}
/* Returns true if SMT is supported and not forcefully (irreversibly) disabled */ bool cpu_smt_possible(void)
{ return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
}
EXPORT_SYMBOL_GPL(cpu_smt_possible);
/* * Already rolling back. No need invert the bringup value or to change * the current state.
*/ if (st->rollback) return;
st->rollback = true;
/* * If we have st->last we need to undo partial multi_instance of this * state first. Otherwise start undo at the previous state.
*/ if (!st->last) { if (st->bringup)
st->state--; else
st->state++;
}
st->bringup = bringup; if (cpu_dying(cpu) != !bringup)
set_cpu_dying(cpu, !bringup);
}
/* Regular hotplug invocation of the AP hotplug thread */ staticvoid __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
{ if (!st->single && st->state == st->target) return;
st->result = 0; /* * Make sure the above stores are visible before should_run becomes * true. Paired with the mb() above in cpuhp_thread_fun()
*/
smp_mb();
st->should_run = true;
wake_up_process(st->thread);
wait_for_ap_thread(st, st->bringup);
}
/* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
wait_for_ap_thread(st, true); if (WARN_ON_ONCE((!cpu_online(cpu)))) return -ECANCELED;
/* Unpark the hotplug thread of the target cpu */
kthread_unpark(st->thread);
/* * SMT soft disabling on X86 requires to bring the CPU out of the * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit. The * CPU marked itself as booted_once in notify_cpu_starting() so the * cpu_bootable() check will now return false if this is not the * primary sibling.
*/ if (!cpu_bootable(cpu)) return -ECANCELED; return 0;
}
#ifdef CONFIG_HOTPLUG_SPLIT_STARTUP staticint cpuhp_kick_ap_alive(unsignedint cpu)
{ if (!cpuhp_can_boot_ap(cpu)) return -EAGAIN;
/* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. * Prevent irq alloc/free across the bringup.
*/
irq_lock_sparse();
ret = cpuhp_bp_sync_alive(cpu); if (ret) goto out_unlock;
ret = bringup_wait_for_ap_online(cpu); if (ret) goto out_unlock;
/* * Some architectures have to walk the irq descriptors to * setup the vector space for the cpu which comes online. * * Prevent irq alloc/free across the bringup by acquiring the * sparse irq lock. Hold it until the upcoming CPU completes the * startup in cpuhp_online_idle() which allows to avoid * intermediate synchronization points in the architecture code.
*/
irq_lock_sparse();
ret = __cpu_up(cpu, idle); if (ret) goto out_unlock;
ret = cpuhp_bp_sync_alive(cpu); if (ret) goto out_unlock;
ret = bringup_wait_for_ap_online(cpu); if (ret) goto out_unlock;
/* * sched_force_init_mm() ensured the use of &init_mm, * drop that refcount now that the CPU has stopped.
*/
WARN_ON(mm != &init_mm);
idle->active_mm = NULL;
mmdrop_lazy_tlb(mm);
return 0;
}
/* * Hotplug state machine related functions
*/
/* * Get the next state to run. Empty ones will be skipped. Returns true if a * state must be run. * * st->state will be modified ahead of time, to match state_to_run, as if it * has already ran.
*/ staticbool cpuhp_next_state(bool bringup, enum cpuhp_state *state_to_run, struct cpuhp_cpu_state *st, enum cpuhp_state target)
{ do { if (bringup) { if (st->state >= target) returnfalse;
staticinlinebool can_rollback_cpu(struct cpuhp_cpu_state *st)
{ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) returntrue; /* * When CPU hotplug is disabled, then taking the CPU down is not * possible because takedown_cpu() and the architecture and * subsystem specific mechanisms are not available. So the CPU * which would be completely unplugged again needs to stay around * in the current state.
*/ return st->state <= CPUHP_BRINGUP_CPU;
}
staticint cpuhp_up_callbacks(unsignedint cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
{ enum cpuhp_state prev_state = st->state; int ret = 0;
ret = cpuhp_invoke_callback_range(true, cpu, st, target); if (ret) {
pr_debug("CPU UP failed (%d) CPU %u state %s (%d)\n",
ret, cpu, cpuhp_get_step(st->state)->name,
st->state);
/* * The cpu hotplug threads manage the bringup and teardown of the cpus
*/ staticint cpuhp_should_run(unsignedint cpu)
{ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
return st->should_run;
}
/* * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke * callbacks when a state gets [un]installed at runtime. * * Each invocation of this function by the smpboot thread does a single AP * state callback. * * It has 3 modes of operation: * - single: runs st->cb_state * - up: runs ++st->state, while st->state < st->target * - down: runs st->state--, while st->state > st->target * * When complete or on error, should_run is cleared and the completion is fired.
*/ staticvoid cpuhp_thread_fun(unsignedint cpu)
{ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); bool bringup = st->bringup; enum cpuhp_state state;
if (WARN_ON_ONCE(!st->should_run)) return;
/* * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures * that if we see ->should_run we also see the rest of the state.
*/
smp_mb();
/* * The BP holds the hotplug lock, but we're now running on the AP, * ensure that anybody asserting the lock is held, will actually find * it so.
*/
lockdep_acquire_cpus_lock();
cpuhp_lock_acquire(bringup);
if (st->single) {
state = st->cb_state;
st->should_run = false;
} else {
st->should_run = cpuhp_next_state(bringup, &state, st, st->target); if (!st->should_run) goto end;
}
/* * STARTING/DYING must not fail!
*/
WARN_ON_ONCE(st->result);
} else {
st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
}
if (st->result) { /* * If we fail on a rollback, we're up a creek without no * paddle, no way forward, no way back. We loose, thanks for * playing.
*/
WARN_ON_ONCE(st->rollback);
st->should_run = false;
}
if (!st->should_run)
complete_ap_thread(st, bringup);
}
/* Invoke a single callback on a remote cpu */ staticint
cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node)
{ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int ret;
/* * If we are up and running, use the hotplug thread. For early calls * we invoke the thread function directly.
*/ if (!st->thread) return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
/** * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU * @cpu: a CPU id * * This function walks all processes, finds a valid mm struct for each one and * then clears a corresponding bit in mm's cpumask. While this all sounds * trivial, there are various non-obvious corner cases, which this function * tries to solve in a safe manner. * * Also note that the function uses a somewhat relaxed locking scheme, so it may * be called only for an already offlined CPU.
*/ void clear_tasks_mm_cpumask(int cpu)
{ struct task_struct *p;
/* * This function is called after the cpu is taken down and marked * offline, so its not like new tasks will ever get this cpu set in * their mm mask. -- Peter Zijlstra * Thus, we may use rcu_read_lock() here, instead of grabbing * full-fledged tasklist_lock.
*/
WARN_ON(cpu_online(cpu));
rcu_read_lock();
for_each_process(p) { struct task_struct *t;
/* * Main thread might exit, but other threads may still have * a valid mm. Find one.
*/
t = find_lock_task_mm(p); if (!t) continue;
arch_clear_mm_cpumask_cpu(cpu, t->mm);
task_unlock(t);
}
rcu_read_unlock();
}
/* Take this CPU down. */ staticint take_cpu_down(void *_param)
{ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); int err, cpu = smp_processor_id();
/* Ensure this CPU doesn't handle any more interrupts. */
err = __cpu_disable(); if (err < 0) return err;
/* * Must be called from CPUHP_TEARDOWN_CPU, which means, as we are going * down, that the current state is CPUHP_TEARDOWN_CPU - 1.
*/
WARN_ON(st->state != (CPUHP_TEARDOWN_CPU - 1));
/* * Invoke the former CPU_DYING callbacks. DYING must not fail!
*/
cpuhp_invoke_callback_range_nofail(false, cpu, st, target);
/* Park the stopper thread */
stop_machine_park(cpu); return 0;
}
/* Park the smpboot threads */
kthread_park(st->thread);
/* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities.
*/
irq_lock_sparse();
err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu)); if (err) { /* CPU refused to die */
irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */
kthread_unpark(st->thread); return err;
}
BUG_ON(cpu_online(cpu));
/* * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed * all runnable tasks from the CPU, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away.
*/
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
hotplug_cpu__broadcast_tick_pull(cpu); /* This actually kills the CPU. */
__cpu_die(cpu);
/* * Callbacks must be re-integrated right away to the RCU state machine. * Otherwise an RCU callback could block a further teardown function * waiting for its completion.
*/
rcutree_migrate_callbacks(cpu);
BUG_ON(st->state != CPUHP_AP_OFFLINE);
tick_assert_timekeeping_handover();
rcutree_report_cpu_dead();
st->state = CPUHP_AP_IDLE_DEAD; /* * We cannot call complete after rcutree_report_cpu_dead() so we delegate it * to an online cpu.
*/
smp_call_function_single(cpumask_first(cpu_online_mask),
cpuhp_complete_idle_dead, st, 0);
}
staticint cpuhp_down_callbacks(unsignedint cpu, struct cpuhp_cpu_state *st, enum cpuhp_state target)
{ enum cpuhp_state prev_state = st->state; int ret = 0;
ret = cpuhp_invoke_callback_range(false, cpu, st, target); if (ret) {
pr_debug("CPU DOWN failed (%d) CPU %u state %s (%d)\n",
ret, cpu, cpuhp_get_step(st->state)->name,
st->state);
cpuhp_reset_state(cpu, st, prev_state);
if (st->state < prev_state)
WARN_ON(cpuhp_invoke_callback_range(true, cpu, st,
prev_state));
}
return ret;
}
/* Requires cpu_add_remove_lock to be held */ staticint __ref _cpu_down(unsignedint cpu, int tasks_frozen, enum cpuhp_state target)
{ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int prev_state, ret = 0;
if (num_online_cpus() == 1) return -EBUSY;
if (!cpu_present(cpu)) return -EINVAL;
cpus_write_lock();
cpuhp_tasks_frozen = tasks_frozen;
prev_state = cpuhp_set_state(cpu, st, target); /* * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread.
*/ if (st->state > CPUHP_TEARDOWN_CPU) {
st->target = max((int)target, CPUHP_TEARDOWN_CPU);
ret = cpuhp_kick_ap_work(cpu); /* * The AP side has done the error rollback already. Just * return the error code..
*/ if (ret) goto out;
/* * We might have stopped still in the range of the AP hotplug * thread. Nothing to do anymore.
*/ if (st->state > CPUHP_TEARDOWN_CPU) goto out;
st->target = target;
} /* * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need * to do the further cleanups.
*/
ret = cpuhp_down_callbacks(cpu, st, target); if (ret && st->state < prev_state) { if (st->state == CPUHP_TEARDOWN_CPU) {
cpuhp_reset_state(cpu, st, prev_state);
__cpuhp_kick_ap(st);
} else {
WARN(1, "DEAD callback error for CPU%d", cpu);
}
}
/* * If the platform does not support hotplug, report it explicitly to * differentiate it from a transient offlining failure.
*/ if (cpu_hotplug_offline_disabled) return -EOPNOTSUPP; if (cpu_hotplug_disabled) return -EBUSY;
/* * Ensure that the control task does not run on the to be offlined * CPU to prevent a deadlock against cfs_b->period_timer. * Also keep at least one housekeeping cpu onlined to avoid generating * an empty sched_domain span.
*/
for_each_cpu_and(cpu, cpu_online_mask, housekeeping_cpumask(HK_TYPE_DOMAIN)) { if (cpu != work.cpu) return work_on_cpu(cpu, __cpu_down_maps_locked, &work);
} return -EBUSY;
}
staticint cpu_down(unsignedint cpu, enum cpuhp_state target)
{ int err;
/** * cpu_device_down - Bring down a cpu device * @dev: Pointer to the cpu device to offline * * This function is meant to be used by device core cpu subsystem only. * * Other subsystems should use remove_cpu() instead. * * Return: %0 on success or a negative errno code
*/ int cpu_device_down(struct device *dev)
{ return cpu_down(dev->id, CPUHP_OFFLINE);
}
int remove_cpu(unsignedint cpu)
{ int ret;
lock_device_hotplug();
ret = device_offline(get_cpu_device(cpu));
unlock_device_hotplug();
return ret;
}
EXPORT_SYMBOL_GPL(remove_cpu);
void smp_shutdown_nonboot_cpus(unsignedint primary_cpu)
{ unsignedint cpu; int error;
cpu_maps_update_begin();
/* * Make certain the cpu I'm about to reboot on is online. * * This is inline to what migrate_to_reboot_cpu() already do.
*/ if (!cpu_online(primary_cpu))
primary_cpu = cpumask_first(cpu_online_mask);
for_each_online_cpu(cpu) { if (cpu == primary_cpu) continue;
error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE); if (error) {
pr_err("Failed to offline CPU%d - error=%d",
cpu, error); break;
}
}
/* * Ensure all but the reboot CPU are offline.
*/
BUG_ON(num_online_cpus() > 1);
/* * Make sure the CPUs won't be enabled by someone else after this * point. Kexec will reboot to a new kernel shortly resetting * everything along the way.
*/
cpu_hotplug_disabled++;
/** * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU * @cpu: cpu that just started * * It must be called by the arch code on the new cpu, before the new cpu * enables interrupts and before the "boot" cpu returns from __cpu_up().
*/ void notify_cpu_starting(unsignedint cpu)
{ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
rcutree_report_cpu_starting(cpu); /* Enables RCU usage on this CPU. */
cpumask_set_cpu(cpu, &cpus_booted_once_mask);
/* * STARTING must not fail!
*/
cpuhp_invoke_callback_range_nofail(true, cpu, st, target);
}
/* * Called from the idle task. Wake up the controlling task which brings the * hotplug thread of the upcoming CPU up and then delegates the rest of the * online bringup to the hotplug thread.
*/ void cpuhp_online_idle(enum cpuhp_state state)
{ struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
/* Happens for the boot cpu */ if (state != CPUHP_AP_ONLINE_IDLE) return;
cpuhp_ap_update_sync_state(SYNC_STATE_ONLINE);
/* * Unpark the stopper thread before we start the idle loop (and start * scheduling); this ensures the stopper task is always available.
*/
stop_machine_unpark(smp_processor_id());
/* Requires cpu_add_remove_lock to be held */ staticint _cpu_up(unsignedint cpu, int tasks_frozen, enum cpuhp_state target)
{ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); struct task_struct *idle; int ret = 0;
cpus_write_lock();
if (!cpu_present(cpu)) {
ret = -EINVAL; goto out;
}
/* * The caller of cpu_up() might have raced with another * caller. Nothing to do.
*/ if (st->state >= target) goto out;
if (st->state == CPUHP_OFFLINE) { /* Let it fail before we try to bring the cpu up */
idle = idle_thread_get(cpu); if (IS_ERR(idle)) {
ret = PTR_ERR(idle); goto out;
}
/* * Reset stale stack state from the last time this CPU was online.
*/
scs_task_reset(idle);
kasan_unpoison_task_stack(idle);
}
cpuhp_tasks_frozen = tasks_frozen;
cpuhp_set_state(cpu, st, target); /* * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread once more.
*/ if (st->state > CPUHP_BRINGUP_CPU) {
ret = cpuhp_kick_ap_work(cpu); /* * The AP side has done the error rollback already. Just * return the error code..
*/ if (ret) goto out;
}
/* * Try to reach the target state. We max out on the BP at * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is * responsible for bringing it up to the target state.
*/
target = min((int)target, CPUHP_BRINGUP_CPU);
ret = cpuhp_up_callbacks(cpu, st, target);
out:
cpus_write_unlock();
arch_smt_update(); return ret;
}
/** * cpu_device_up - Bring up a cpu device * @dev: Pointer to the cpu device to online * * This function is meant to be used by device core cpu subsystem only. * * Other subsystems should use add_cpu() instead. * * Return: %0 on success or a negative errno code
*/ int cpu_device_up(struct device *dev)
{ return cpu_up(dev->id, CPUHP_ONLINE);
}
int add_cpu(unsignedint cpu)
{ int ret;
lock_device_hotplug();
ret = device_online(get_cpu_device(cpu));
unlock_device_hotplug();
return ret;
}
EXPORT_SYMBOL_GPL(add_cpu);
/** * bringup_hibernate_cpu - Bring up the CPU that we hibernated on * @sleep_cpu: The cpu we hibernated on and should be brought up. * * On some architectures like arm64, we can hibernate on any CPU, but on * wake up the CPU we hibernated on might be offline as a side effect of * using maxcpus= for example. * * Return: %0 on success or a negative errno code
*/ int bringup_hibernate_cpu(unsignedint sleep_cpu)
{ int ret;
if (!cpu_online(sleep_cpu)) {
pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
ret = cpu_up(sleep_cpu, CPUHP_ONLINE); if (ret) {
pr_err("Failed to bring hibernate-CPU up!\n"); return ret;
}
} return 0;
}
if (cpu_up(cpu, target) && can_rollback_cpu(st)) { /* * If this failed then cpu_up() might have only * rolled back to CPUHP_BP_KICK_AP for the final * online. Clean it up. NOOP if already rolled back.
*/
WARN_ON(cpuhp_invoke_callback_range(false, cpu, st, CPUHP_OFFLINE));
}
/* * On architectures which have enabled parallel bringup this invokes all BP * prepare states for each of the to be onlined APs first. The last state * sends the startup IPI to the APs. The APs proceed through the low level * bringup code in parallel and then wait for the control CPU to release * them one by one for the final onlining procedure. * * This avoids waiting for each AP to respond to the startup IPI in * CPUHP_BRINGUP_CPU.
*/ staticbool __init cpuhp_bringup_cpus_parallel(unsignedint ncpus)
{ conststruct cpumask *mask = cpu_present_mask;
if (__cpuhp_parallel_bringup)
__cpuhp_parallel_bringup = arch_cpuhp_init_parallel_bringup(); if (!__cpuhp_parallel_bringup) returnfalse;
/* * X86 requires to prevent that SMT siblings stopped while * the primary thread does a microcode update for various * reasons. Bring the primary threads up first.
*/
cpumask_and(&tmp_mask, mask, pmask);
cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_BP_KICK_AP);
cpuhp_bringup_mask(&tmp_mask, ncpus, CPUHP_ONLINE); /* Account for the online CPUs */
ncpus -= num_online_cpus(); if (!ncpus) returntrue; /* Create the mask for secondary CPUs */
cpumask_andnot(&tmp_mask, mask, pmask);
mask = &tmp_mask;
}
/* Bring the not-yet started CPUs up */
cpuhp_bringup_mask(mask, ncpus, CPUHP_BP_KICK_AP);
cpuhp_bringup_mask(mask, ncpus, CPUHP_ONLINE); returntrue;
} #else staticinlinebool cpuhp_bringup_cpus_parallel(unsignedint ncpus) { returnfalse; } #endif/* CONFIG_HOTPLUG_PARALLEL */
void __init bringup_nonboot_cpus(unsignedint max_cpus)
{ if (!max_cpus) return;
/* Try parallel bringup optimization if enabled */ if (cpuhp_bringup_cpus_parallel(max_cpus)) return;
/* Full per CPU serialized bringup */
cpuhp_bringup_mask(cpu_present_mask, max_cpus, CPUHP_ONLINE);
}
int freeze_secondary_cpus(int primary)
{ int cpu, error = 0;
cpu_maps_update_begin(); if (primary == -1) {
primary = cpumask_first(cpu_online_mask); if (!housekeeping_cpu(primary, HK_TYPE_TIMER))
primary = housekeeping_any_cpu(HK_TYPE_TIMER);
} else { if (!cpu_online(primary))
primary = cpumask_first(cpu_online_mask);
}
/* * We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time
*/
cpumask_clear(frozen_cpus);
pr_info("Disabling non-boot CPUs ...\n"); for (cpu = nr_cpu_ids - 1; cpu >= 0; cpu--) { if (!cpu_online(cpu) || cpu == primary) continue;
if (pm_wakeup_pending()) {
pr_info("Wakeup pending. Abort CPU freeze\n");
error = -EBUSY; break;
}
if (!error)
BUG_ON(num_online_cpus() > 1); else
pr_err("Non-boot CPUs are not disabled\n");
/* * Make sure the CPUs won't be enabled by someone else. We need to do * this even in case of failure as all freeze_secondary_cpus() users are * supposed to do thaw_secondary_cpus() on the failure path.
*/
cpu_hotplug_disabled++;
/* * When callbacks for CPU hotplug notifications are being executed, we must * ensure that the state of the system with respect to the tasks being frozen * or not, as reported by the notification, remains unchanged *throughout the * duration* of the execution of the callbacks. * Hence we need to prevent the freezer from racing with regular CPU hotplug. * * This synchronization is implemented by mutually excluding regular CPU * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/ * Hibernate notifications.
*/ staticint
cpu_hotplug_pm_callback(struct notifier_block *nb, unsignedlong action, void *ptr)
{ switch (action) {
case PM_SUSPEND_PREPARE: case PM_HIBERNATION_PREPARE:
cpu_hotplug_disable(); break;
case PM_POST_SUSPEND: case PM_POST_HIBERNATION:
cpu_hotplug_enable(); break;
default: return NOTIFY_DONE;
}
return NOTIFY_OK;
}
staticint __init cpu_hotplug_pm_sync_init(void)
{ /* * cpu_hotplug_pm_callback has higher priority than x86 * bsp_pm_callback which depends on cpu_hotplug_pm_callback * to disable cpu hotplug to avoid cpu hotplug race.
*/
pm_notifier(cpu_hotplug_pm_callback, 0); return 0;
}
core_initcall(cpu_hotplug_pm_sync_init);
#ifdef CONFIG_HOTPLUG_SPLIT_STARTUP /* * Kicks the AP alive. AP will wait in cpuhp_ap_sync_alive() until * the next step will release it.
*/
[CPUHP_BP_KICK_AP] = {
.name = "cpu:kick_ap",
.startup.single = cpuhp_kick_ap_alive,
},
/* * Waits for the AP to reach cpuhp_ap_sync_alive() and then * releases it for the complete bringup.
*/
[CPUHP_BRINGUP_CPU] = {
.name = "cpu:bringup",
.startup.single = cpuhp_bringup_ap,
.teardown.single = finish_cpu,
.cant_stop = true,
}, #else /* * All-in-one CPU bringup state which includes the kick alive.
*/
[CPUHP_BRINGUP_CPU] = {
.name = "cpu:bringup",
.startup.single = bringup_cpu,
.teardown.single = finish_cpu,
.cant_stop = true,
}, #endif /* Final state before CPU kills itself */
[CPUHP_AP_IDLE_DEAD] = {
.name = "idle:dead",
}, /* * Last state before CPU enters the idle loop to die. Transient state * for synchronization.
*/
[CPUHP_AP_OFFLINE] = {
.name = "ap:offline",
.cant_stop = true,
}, /* First state is scheduler control. Interrupts are disabled */
[CPUHP_AP_SCHED_STARTING] = {
.name = "sched:starting",
.startup.single = sched_cpu_starting,
.teardown.single = sched_cpu_dying,
},
[CPUHP_AP_RCUTREE_DYING] = {
.name = "RCU/tree:dying",
.startup.single = NULL,
.teardown.single = rcutree_dying_cpu,
},
[CPUHP_AP_SMPCFD_DYING] = {
.name = "smpcfd:dying",
.startup.single = NULL,
.teardown.single = smpcfd_dying_cpu,
},
[CPUHP_AP_HRTIMERS_DYING] = {
.name = "hrtimers:dying",
.startup.single = hrtimers_cpu_starting,
.teardown.single = hrtimers_cpu_dying,
},
[CPUHP_AP_TICK_DYING] = {
.name = "tick:dying",
.startup.single = NULL,
.teardown.single = tick_cpu_dying,
}, /* Entry state on starting. Interrupts enabled from here on. Transient
* state for synchronsization */
[CPUHP_AP_ONLINE] = {
.name = "ap:online",
}, /* * Handled on control processor until the plugged processor manages * this itself.
*/
[CPUHP_TEARDOWN_CPU] = {
.name = "cpu:teardown",
.startup.single = NULL,
.teardown.single = takedown_cpu,
.cant_stop = true,
},
#ifdef CONFIG_SMP /* Last state is scheduler control setting the cpu active */
[CPUHP_AP_ACTIVE] = {
.name = "sched:active",
.startup.single = sched_cpu_activate,
.teardown.single = sched_cpu_deactivate,
}, #endif
/* CPU is fully up and running. */
[CPUHP_ONLINE] = {
.name = "online",
.startup.single = NULL,
.teardown.single = NULL,
},
};
/* Sanity check for callbacks */ staticint cpuhp_cb_check(enum cpuhp_state state)
{ if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE) return -EINVAL; return 0;
}
/* * Returns a free for dynamic slot assignment of the Online state. The states * are protected by the cpuhp_slot_states mutex and an empty slot is identified * by having no name assigned.
*/ staticint cpuhp_reserve_state(enum cpuhp_state state)
{ enum cpuhp_state i, end; struct cpuhp_step *step;
switch (state) { case CPUHP_AP_ONLINE_DYN:
step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
end = CPUHP_AP_ONLINE_DYN_END; break; case CPUHP_BP_PREPARE_DYN:
step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
end = CPUHP_BP_PREPARE_DYN_END; break; default: return -EINVAL;
}
for (i = state; i <= end; i++, step++) { if (!step->name) return i;
}
WARN(1, "No more dynamic states available for CPU hotplug\n"); return -ENOSPC;
}
staticint cpuhp_store_callbacks(enum cpuhp_state state, constchar *name, int (*startup)(unsignedint cpu), int (*teardown)(unsignedint cpu), bool multi_instance)
{ /* (Un)Install the callbacks for further cpu hotplug operations */ struct cpuhp_step *sp; int ret = 0;
/* * If name is NULL, then the state gets removed. * * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on * the first allocation from these dynamic ranges, so the removal * would trigger a new allocation and clear the wrong (already * empty) state, leaving the callbacks of the to be cleared state * dangling, which causes wreckage on the next hotplug operation.
*/ if (name && (state == CPUHP_AP_ONLINE_DYN ||
state == CPUHP_BP_PREPARE_DYN)) {
ret = cpuhp_reserve_state(state); if (ret < 0) return ret;
state = ret;
}
sp = cpuhp_get_step(state); if (name && sp->name) return -EBUSY;
/* * Call the startup/teardown function for a step either on the AP or * on the current CPU.
*/ staticint cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, struct hlist_node *node)
{ struct cpuhp_step *sp = cpuhp_get_step(state); int ret;
/* * If there's nothing to do, we done. * Relies on the union for multi_instance.
*/ if (cpuhp_step_empty(bringup, sp)) return 0; /* * The non AP bound callbacks can fail on bringup. On teardown * e.g. module removal we crash for now.
*/ #ifdef CONFIG_SMP if (cpuhp_is_ap_state(state))
ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node); else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); #else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); #endif
BUG_ON(ret && !bringup); return ret;
}
/* * Called from __cpuhp_setup_state on a recoverable failure. * * Note: The teardown callbacks for rollback are not allowed to fail!
*/ staticvoid cpuhp_rollback_install(int failedcpu, enum cpuhp_state state, struct hlist_node *node)
{ int cpu;
/* Roll back the already executed steps on the other cpus */
for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state;
if (cpu >= failedcpu) break;
/* Did we invoke the startup call on that cpu ? */ if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
}
int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, struct hlist_node *node, bool invoke)
{ struct cpuhp_step *sp; int cpu; int ret;
lockdep_assert_cpus_held();
sp = cpuhp_get_step(state); if (sp->multi_instance == false) return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !sp->startup.multi) goto add_node;
/* * Try to call the startup callback for each present cpu * depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state;
if (cpustate < state) continue;
ret = cpuhp_issue_call(cpu, state, true, node); if (ret) { if (sp->teardown.multi)
cpuhp_rollback_install(cpu, state, node); goto unlock;
}
}
add_node:
ret = 0;
hlist_add_head(node, &sp->list);
unlock:
mutex_unlock(&cpuhp_state_mutex); return ret;
}
int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke)
{ int ret;
/** * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state * @state: The state to setup * @name: Name of the step * @invoke: If true, the startup function is invoked for cpus where * cpu state >= @state * @startup: startup callback function * @teardown: teardown callback function * @multi_instance: State is set up for multiple instances which get * added afterwards. * * The caller needs to hold cpus read locked while calling this function. * Return: * On success: * Positive state number if @state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN; * 0 for all other states * On failure: proper (negative) error code
*/ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, constchar *name, bool invoke, int (*startup)(unsignedint cpu), int (*teardown)(unsignedint cpu), bool multi_instance)
{ int cpu, ret = 0; bool dynstate;
lockdep_assert_cpus_held();
if (cpuhp_cb_check(state) || !name) return -EINVAL;
mutex_lock(&cpuhp_state_mutex);
ret = cpuhp_store_callbacks(state, name, startup, teardown,
multi_instance);
dynstate = state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN; if (ret > 0 && dynstate) {
state = ret;
ret = 0;
}
if (ret || !invoke || !startup) goto out;
/* * Try to call the startup callback for each present cpu * depending on the hotplug state of the cpu.
*/
for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state;
if (cpustate < state) continue;
ret = cpuhp_issue_call(cpu, state, true, NULL); if (ret) { if (teardown)
cpuhp_rollback_install(cpu, state, NULL);
cpuhp_store_callbacks(state, NULL, NULL, NULL, false); goto out;
}
}
out:
mutex_unlock(&cpuhp_state_mutex); /* * If the requested state is CPUHP_AP_ONLINE_DYN or CPUHP_BP_PREPARE_DYN, * return the dynamically allocated state in case of success.
*/ if (!ret && dynstate) return state; return ret;
}
EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
int __cpuhp_setup_state(enum cpuhp_state state, constchar *name, bool invoke, int (*startup)(unsignedint cpu), int (*teardown)(unsignedint cpu), bool multi_instance)
{ int ret;
int __cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node, bool invoke)
{ struct cpuhp_step *sp = cpuhp_get_step(state); int cpu;
BUG_ON(cpuhp_cb_check(state));
if (!sp->multi_instance) return -EINVAL;
cpus_read_lock();
mutex_lock(&cpuhp_state_mutex);
if (!invoke || !cpuhp_get_teardown_cb(state)) goto remove; /* * Call the teardown callback for each present cpu depending * on the hotplug state of the cpu. This function is not * allowed to fail currently!
*/
for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state;
if (cpustate >= state)
cpuhp_issue_call(cpu, state, false, node);
}
/** * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state * @state: The state to remove * @invoke: If true, the teardown function is invoked for cpus where * cpu state >= @state * * The caller needs to hold cpus read locked while calling this function. * The teardown callback is currently not allowed to fail. Think * about module removal!
*/ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
{ struct cpuhp_step *sp = cpuhp_get_step(state); int cpu;
BUG_ON(cpuhp_cb_check(state));
lockdep_assert_cpus_held();
mutex_lock(&cpuhp_state_mutex); if (sp->multi_instance) {
WARN(!hlist_empty(&sp->list), "Error: Removing state %d which has instances left.\n",
state); goto remove;
}
if (!invoke || !cpuhp_get_teardown_cb(state)) goto remove;
/* * Call the teardown callback for each present cpu depending * on the hotplug state of the cpu. This function is not * allowed to fail currently!
*/
for_each_present_cpu(cpu) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int cpustate = st->state;
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.51Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.