/* * External debuggers may need to write directly to the text mapping to * install SW breakpoints. Allow this (only) when explicitly requested * with rodata=off.
*/ if (arm64_test_sw_feature_override(ARM64_SW_FEATURE_OVERRIDE_RODATA_OFF))
text_prot = PAGE_KERNEL_EXEC;
/* * We only enable the shadow call stack dynamically if we are running * on a system that does not implement PAC or BTI. PAC and SCS provide * roughly the same level of protection, and BTI relies on the PACIASP * instructions serving as landing pads, preventing us from patching * those instructions into something else.
*/ if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && cpu_has_pac())
enable_scs = false;
if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && cpu_has_bti()) {
enable_scs = false;
/* * If we have a CPU that supports BTI and a kernel built for * BTI then mark the kernel executable text as guarded pages * now so we don't have to rewrite the page tables later.
*/
text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP);
}
/* Map all code read-write on the first pass if needed */
twopass |= enable_scs;
prot = twopass ? data_prot : text_prot;
/* * Unmap the text region before remapping it, to avoid * potential TLB conflicts when creating the contiguous * descriptors.
*/
unmap_segment(init_pg_dir, va_offset, _stext, _etext,
root_level);
dsb(ishst);
isb();
__tlbi(vmalle1);
isb();
/* * Remap these segments with different permissions * No new page table allocations should be needed
*/
map_segment(init_pg_dir, NULL, va_offset, _stext, _etext,
text_prot, true, root_level);
map_segment(init_pg_dir, NULL, va_offset, __inittext_begin,
__inittext_end, text_prot, false, root_level);
}
/* Copy the root page table to its final location */
memcpy((void *)swapper_pg_dir + va_offset, init_pg_dir, PAGE_SIZE);
dsb(ishst);
idmap_cpu_replace_ttbr1(swapper_pg_dir);
}
staticvoid __init remap_idmap_for_lpa2(void)
{ /* clear the bits that change meaning once LPA2 is turned on */
ptdesc_t mask = PTE_SHARED;
/* * We have to clear bits [9:8] in all block or page descriptors in the * initial ID map, as otherwise they will be (mis)interpreted as * physical address bits once we flick the LPA2 switch (TCR.DS). Since * we cannot manipulate live descriptors in that way without creating * potential TLB conflicts, let's create another temporary ID map in a * LPA2 compatible fashion, and update the initial ID map while running * from that.
*/
create_init_idmap(init_pg_dir, mask);
dsb(ishst);
set_ttbr0_for_lpa2((u64)init_pg_dir);
/* * Recreate the initial ID map with the same granularity as before. * Don't bother with the FDT, we no longer need it after this.
*/
memset(init_idmap_pg_dir, 0,
(u64)init_idmap_pg_end - (u64)init_idmap_pg_dir);
if (va_bits > VA_BITS_MIN)
sysreg_clear_set(tcr_el1, TCR_T1SZ_MASK, TCR_T1SZ(va_bits));
/* * The virtual KASLR displacement modulo 2MiB is decided by the * physical placement of the image, as otherwise, we might not be able * to create the early kernel mapping using 2 MiB block descriptors. So * take the low bits of the KASLR offset from the physical address, and * fill in the high bits from the seed.
*/ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
u64 kaslr_seed = kaslr_early_init(fdt, chosen);
if (kaslr_seed && kaslr_requires_kpti())
arm64_use_ng_mappings = ng_mappings_allowed();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.