// SPDX-License-Identifier: GPL-2.0-or-later /* * CPU Microcode Update Driver for Linux * * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> * 2006 Shaohua Li <shaohua.li@intel.com> * 2013-2016 Borislav Petkov <bp@alien8.de> * * X86 CPU microcode early update for Linux: * * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> * H Peter Anvin" <hpa@zytor.com> * (C) 2015 Borislav Petkov <bp@alien8.de> * * This driver allows to upgrade microcode on x86 processors.
*/
/* * Synchronization. * * All non cpu-hotplug-callback call sites use: * * - cpus_read_lock/unlock() to synchronize with * the cpu-hotplug-callback call sites. * * We guarantee that only a single cpu is being * updated at any particular moment of time.
*/ struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
/* * Those patch levels cannot be updated to newer ones and thus should be final.
*/ static u32 final_levels[] = {
0x01000098,
0x0100009f,
0x010000af,
0, /* T-101 terminator */
};
struct early_load_data early_data;
/* * Check the current patch level on this CPU. * * Returns: * - true: if update should stop * - false: otherwise
*/ staticbool amd_check_current_patch_level(void)
{
u32 lvl, dummy, i;
u32 *levels;
if (x86_cpuid_vendor() != X86_VENDOR_AMD) returnfalse;
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
levels = final_levels;
for (i = 0; levels[i]; i++) { if (lvl == levels[i]) returntrue;
} returnfalse;
}
bool __init microcode_loader_disabled(void)
{ if (dis_ucode_ldr) returntrue;
/* * Disable when: * * 1) The CPU does not support CPUID. * * 2) Bit 31 in CPUID[1]:ECX is clear * The bit is reserved for hypervisor use. This is still not * completely accurate as XEN PV guests don't see that CPUID bit * set, but that's good enough as they don't land on the BSP * path anyway. * * 3) Certain AMD patch levels are not allowed to be * overwritten.
*/ if (!cpuid_feature() ||
native_cpuid_ecx(1) & BIT(31) ||
amd_check_current_patch_level())
dis_ucode_ldr = true;
/* * Can't use microcode_loader_disabled() here - .init section * hell. It doesn't have to either - the BSP variant must've * parsed cmdline already anyway.
*/ if (dis_ucode_ldr) return;
cpuid_1_eax = native_cpuid_eax(1);
switch (x86_cpuid_vendor()) { case X86_VENDOR_INTEL: if (x86_family(cpuid_1_eax) >= 6)
load_ucode_intel_ap(); break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >= 0x10)
load_ucode_amd_ap(cpuid_1_eax); break; default: break;
}
}
/* * Fixup the start address: after reserve_initrd() runs, initrd_start * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead.
*/ if (initrd_start)
start = initrd_start;
staticvoid reload_early_microcode(unsignedint cpu)
{ int vendor, family;
vendor = x86_cpuid_vendor();
family = x86_cpuid_family();
switch (vendor) { case X86_VENDOR_INTEL: if (family >= 6)
reload_ucode_intel(); break; case X86_VENDOR_AMD: if (family >= 0x10)
reload_ucode_amd(cpu); break; default: break;
}
}
/* fake device for request_firmware */ staticstruct faux_device *microcode_fdev;
#ifdef CONFIG_MICROCODE_LATE_LOADING /* * Late loading dance. Why the heavy-handed stomp_machine effort? * * - HT siblings must be idle and not execute other code while the other sibling * is loading microcode in order to avoid any negative interactions caused by * the loading. * * - In addition, microcode update on the cores must be serialized until this * requirement can be relaxed in the future. Right now, this is conservative * and good.
*/ enum sibling_ctrl { /* Spinwait with timeout */
SCTRL_WAIT, /* Invoke the microcode_apply() callback */
SCTRL_APPLY, /* Proceed without invoking the microcode_apply() callback */
SCTRL_DONE,
};
for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { if (!raw_atomic_read(cnt)) returntrue;
for (loops = 0; loops < loops_per_usec; loops++)
cpu_relax();
/* If invoked directly, tickle the NMI watchdog */ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
instrumentation_begin();
touch_nmi_watchdog();
instrumentation_end();
}
} /* Prevent the late comers from making progress and let them time out */
raw_atomic_inc(cnt); returnfalse;
}
for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) returntrue;
for (loops = 0; loops < loops_per_usec; loops++)
cpu_relax();
/* If invoked directly, tickle the NMI watchdog */ if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) {
instrumentation_begin();
touch_nmi_watchdog();
instrumentation_end();
}
} returnfalse;
}
/* * Protected against instrumentation up to the point where the primary * thread completed the update. See microcode_nmi_handler() for details.
*/ static noinstr bool load_secondary_wait(unsignedint ctrl_cpu)
{ /* Initial rendezvous to ensure that all CPUs have arrived */ if (!wait_for_cpus(&late_cpus_in)) {
raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); returnfalse;
}
/* * Wait for primary threads to complete. If one of them hangs due * to the update, there is no way out. This is non-recoverable * because the CPU might hold locks or resources and confuse the * scheduler, watchdogs etc. There is no way to safely evacuate the * machine.
*/ if (wait_for_ctrl()) returntrue;
/* * Protected against instrumentation up to the point where the primary * thread completed the update. See microcode_nmi_handler() for details.
*/ static noinstr void load_secondary(unsignedint cpu)
{ unsignedint ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu); enum ucode_state ret;
/* Primary thread completed. Allow to invoke instrumentable code */
instrumentation_begin(); /* * If the primary succeeded then invoke the apply() callback, * otherwise copy the state from the primary thread.
*/ if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY)
ret = microcode_ops->apply_microcode(cpu); else
ret = per_cpu(ucode_ctrl.result, ctrl_cpu);
/* Initial rendezvous to ensure that all CPUs have arrived */ if (!wait_for_cpus(&late_cpus_in)) {
this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT);
pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); return;
}
ret = microcode_ops->apply_microcode(cpu);
this_cpu_write(ucode_ctrl.result, ret);
this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE);
/* * If the update was successful, let the siblings run the apply() * callback. If not, tell them it's done. This also covers the * case where the CPU has uniform loading at package or system * scope implemented but does not advertise it.
*/ if (ret == UCODE_UPDATED || ret == UCODE_OK)
ctrl = SCTRL_APPLY; else
ctrl = SCTRL_DONE;
for_each_cpu(cpu, &cpu_offline_mask) { /* Enable the rendezvous handler and send NMI */
per_cpu(ucode_ctrl.nmi_enabled, cpu) = true;
apic_send_nmi_to_offline_cpu(cpu);
}
/* Wait for them to arrive */ for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) { if (atomic_read(&offline_in_nmi) == nr_offl) returntrue;
udelay(1);
} /* Let the others time out */ returnfalse;
}
/* Kick soft-offlined SMT siblings if required */ if (!cpu && nr_offl)
proceed = kick_offline_cpus(nr_offl);
/* If the soft-offlined CPUs did not respond, abort */ if (proceed)
__load_primary(cpu);
/* Unconditionally release soft-offlined SMT siblings if required */ if (!cpu && nr_offl)
release_offline_cpus();
}
/* * Minimal stub rendezvous handler for soft-offlined CPUs which participate * in the NMI rendezvous to protect against a concurrent NMI on affected * CPUs.
*/ void noinstr microcode_offline_nmi_handler(void)
{ if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) return;
raw_cpu_write(ucode_ctrl.nmi_enabled, false);
raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE);
raw_atomic_inc(&offline_in_nmi);
wait_for_ctrl();
}
static noinstr bool microcode_update_handler(void)
{ unsignedint cpu = raw_smp_processor_id();
/* * Protection against instrumentation is required for CPUs which are not * safe against an NMI which is delivered to the secondary SMT sibling * while the primary thread updates the microcode. Instrumentation can end * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI * which is the opposite of what the NMI rendezvous is trying to achieve. * * The primary thread is safe versus instrumentation as the actual * microcode update handles this correctly. It's only the sibling code * path which must be NMI safe until the primary thread completed the * update.
*/ bool noinstr microcode_nmi_handler(void)
{ if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) returnfalse;
/* * Take a snapshot before the microcode update in order to compare and * check whether any bits changed after an update.
*/
store_cpu_caps(&prev_info);
if (microcode_ops->use_nmi)
static_branch_enable_cpuslocked(µcode_nmi_handler_enable);
/* * This function does two things: * * 1) Ensure that all required CPUs which are present and have been booted * once are online. * * To pass this check, all primary threads must be online. * * If the microcode load is not safe against NMI then all SMT threads * must be online as well because they still react to NMIs when they are * soft-offlined and parked in one of the play_dead() variants. So if a * NMI hits while the primary thread updates the microcode the resulting * behaviour is undefined. The default play_dead() implementation on * modern CPUs uses MWAIT, which is also not guaranteed to be safe * against a microcode update which affects MWAIT. * * As soft-offlined CPUs still react on NMIs, the SMT sibling * restriction can be lifted when the vendor driver signals to use NMI * for rendezvous and the APIC provides a mechanism to send an NMI to a * soft-offlined CPU. The soft-offlined CPUs are then able to * participate in the rendezvous in a trivial stub handler. * * 2) Initialize the per CPU control structure and create a cpumask * which contains "offline"; secondary threads, so they can be handled * correctly by a control CPU.
*/ staticbool setup_cpus(void)
{ struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; bool allow_smt_offline; unsignedint cpu;
for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { /* * Offline CPUs sit in one of the play_dead() functions * with interrupts disabled, but they still react on NMIs * and execute arbitrary code. Also MWAIT being updated * while the offline CPU sits there is not necessarily safe * on all CPU variants. * * Mark them in the offline_cpus mask which will be handled * by CPU0 later in the update process. * * Ensure that the primary thread is online so that it is * guaranteed that all cores are updated.
*/ if (!cpu_online(cpu)) { if (topology_is_primary_thread(cpu) || !allow_smt_offline) {
pr_err("CPU %u not online, loading aborted\n", cpu); returnfalse;
}
cpumask_set_cpu(cpu, &cpu_offline_mask);
per_cpu(ucode_ctrl, cpu) = ctrl; continue;
}
/* * Initialize the per CPU state. This is core scope for now, * but prepared to take package or system scope into account.
*/
ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu));
per_cpu(ucode_ctrl, cpu) = ctrl;
} returntrue;
}
staticint load_late_locked(void)
{ if (!setup_cpus()) return -EBUSY;
switch (microcode_ops->request_microcode_fw(0, µcode_fdev->dev)) { case UCODE_NEW: return load_late_stop_cpus(false); case UCODE_NEW_SAFE: return load_late_stop_cpus(true); case UCODE_NFOUND: return -ENOENT; case UCODE_OK: return 0; default: return -EBADFD;
}
}
if (c->x86_vendor == X86_VENDOR_INTEL)
microcode_ops = init_intel_microcode(); elseif (c->x86_vendor == X86_VENDOR_AMD)
microcode_ops = init_amd_microcode(); else
pr_err("no support for this CPU vendor\n");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.