/* * Used to generate warnings if mmu or cpu feature check functions that * use static keys before they are initialized.
*/ bool static_key_feature_checks_initialized __read_mostly;
EXPORT_SYMBOL_GPL(static_key_feature_checks_initialized);
struct fixup_entry { unsignedlong mask; unsignedlong value; long start_off; long end_off; long alt_start_off; long alt_end_off;
};
static u32 *calc_addr(struct fixup_entry *fcur, long offset)
{ /* * We store the offset to the code as a negative offset from * the start of the alt_entry, to support the VDSO. This * routine converts that back into an actual address.
*/ return (u32 *)((unsignedlong)fcur + offset);
}
void do_stf_barrier_fixups(enum stf_barrier_type types)
{ /* * The call to the fallback entry flush, and the fallback/sync-ori exit * flush can not be safely patched in/out while other CPUs are * executing them. So call __do_stf_barrier_fixups() on one CPU while * all other CPUs spin in the stop machine core with interrupts hard * disabled. * * The branch to mark interrupt exits non-reentrant is enabled first, * then stop_machine runs which will ensure all CPUs are out of the * low level interrupt exit code before patching. After the patching, * if allowed, then flip the branch to allow fast exits.
*/
// Prevent static key update races with do_rfi_flush_fixups()
mutex_lock(&exit_flush_lock);
static_branch_enable(&interrupt_exit_not_reentrant);
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = PPC_RAW_MTSPR(SPRN_TRIG2, _R0);
/* * If we're patching in or out the fallback flush we need to be careful about the * order in which we patch instructions. That's because it's possible we could * take a page fault after patching one instruction, so the sequence of * instructions must be safe even in a half patched state. * * To make that work, when patching in the fallback flush we patch in this order: * - the mflr (dest) * - the mtlr (dest + 2) * - the branch (dest + 1) * * That ensures the sequence is safe to execute at any point. In contrast if we * patch the mtlr last, it's possible we could return from the branch and not * restore LR, leading to a crash later. * * When patching out the fallback flush (either with nops or another flush type), * we patch in this order: * - the branch (dest + 1) * - the mtlr (dest + 2) * - the mflr (dest) * * Note we are protected by stop_machine() from other CPUs executing the code in a * semi-patched state.
*/
start = PTRRELOC(&__start___entry_flush_fixup);
end = PTRRELOC(&__stop___entry_flush_fixup);
i = do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK,
&entry_flush_fallback);
start = PTRRELOC(&__start___scv_entry_flush_fixup);
end = PTRRELOC(&__stop___scv_entry_flush_fixup);
i += do_patch_entry_fixups(start, end, instrs, types == L1D_FLUSH_FALLBACK,
&scv_entry_flush_fallback);
void do_entry_flush_fixups(enum l1d_flush_type types)
{ /* * The call to the fallback flush can not be safely patched in/out while * other CPUs are executing it. So call __do_entry_flush_fixups() on one * CPU while all other CPUs spin in the stop machine core with interrupts * hard disabled.
*/
stop_machine(__do_entry_flush_fixups, &types, NULL);
}
staticint __do_rfi_flush_fixups(void *data)
{ enum l1d_flush_type types = *(enum l1d_flush_type *)data; unsignedint instrs[3]; long *start, *end; int i;
start = PTRRELOC(&__start___rfi_flush_fixup);
end = PTRRELOC(&__stop___rfi_flush_fixup);
void do_rfi_flush_fixups(enum l1d_flush_type types)
{ /* * stop_machine gets all CPUs out of the interrupt exit handler same * as do_stf_barrier_fixups. do_rfi_flush_fixups patching can run * without stop_machine, so this could be achieved with a broadcast * IPI instead, but this matches the stf sequence.
*/
// Prevent static key update races with do_stf_barrier_fixups()
mutex_lock(&exit_flush_lock);
static_branch_enable(&interrupt_exit_not_reentrant);
/* * Apply the CPU-specific and firmware specific fixups to kernel text * (nop out sections not relevant to this CPU or this firmware).
*/
do_feature_fixups(spec->cpu_features,
PTRRELOC(&__start___ftr_fixup),
PTRRELOC(&__stop___ftr_fixup));
void __init setup_feature_keys(void)
{ /* * Initialise jump label. This causes all the cpu/mmu_has_feature() * checks to take on their correct polarity based on the current set of * CPU/MMU features.
*/
jump_label_init();
cpu_feature_keys_init();
mmu_feature_keys_init();
static_key_feature_checks_initialized = true;
}
staticint __init check_features(void)
{
WARN(saved_cpu_features != cur_cpu_spec->cpu_features, "CPU features changed after feature patching!\n");
WARN(saved_mmu_features != cur_cpu_spec->mmu_features, "MMU features changed after feature patching!\n"); #ifdef CONFIG_PPC64
WARN(saved_firmware_features != powerpc_firmware_features, "Firmware features changed after feature patching!\n"); #endif
return 0;
}
late_initcall(check_features);
#ifdef CONFIG_FTR_FIXUP_SELFTEST
#define check(x) \ if (!(x)) printk("feature-fixups: test failed at line %d\n", __LINE__);
/* Check we don't patch if the value matches */
patch_feature_section(8, &fixup);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
memcpy(ftr_fixup_test1, ftr_fixup_test1_orig, size);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_orig, size) == 0);
patch_feature_section(~8, &fixup);
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
}
/* Check we don't patch if the value matches */
patch_feature_section(0xF, &fixup);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
memcpy(ftr_fixup_test2, ftr_fixup_test2_orig, size);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_orig, size) == 0);
patch_feature_section(~0xF, &fixup);
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
}
/* Check we don't patch if the value matches */
patch_feature_section(flag, &fixup);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
/* Check we do patch if the value doesn't match */
patch_feature_section(0, &fixup);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
/* Check we do patch if the mask doesn't match */
memcpy(ftr_fixup_test4, ftr_fixup_test4_orig, size);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_orig, size) == 0);
patch_feature_section(~flag, &fixup);
check(memcmp(ftr_fixup_test4, ftr_fixup_test4_expected, size) == 0);
}
/* The fixups have already been done for us during boot */
check(memcmp(ftr_fixup_test_FW_FTR_macros,
ftr_fixup_test_FW_FTR_macros_expected, size) == 0); #endif
}
/* The fixups have already been done for us during boot */ if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
check(memcmp(lwsync_fixup_test,
lwsync_fixup_test_expected_LWSYNC, size) == 0);
} else {
check(memcmp(lwsync_fixup_test,
lwsync_fixup_test_expected_SYNC, size) == 0);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.