// SPDX-License-Identifier: GPL-2.0-or-later /* * Flexible mmap layout support * * Based on code by Ingo Molnar and Andi Kleen, copyrighted * as follows: * * Copyright 2003-2009 Red Hat Inc. * All Rights Reserved. * Copyright 2005 Andi Kleen, SUSE Labs. * Copyright 2007 Jiri Kosina, SUSE Labs.
*/
staticunsignedlong mmap_base(unsignedlong rnd, unsignedlong task_size, struct rlimit *rlim_stack)
{ unsignedlong gap = rlim_stack->rlim_cur; unsignedlong pad = stack_maxrandom_size(task_size) + stack_guard_gap;
/* Values close to RLIM_INFINITY can overflow. */ if (gap + pad > gap)
gap += pad;
/* * Top of mmap area (just below the process stack). * Leave an at least ~128 MB hole with possible stack randomization.
*/
gap = clamp(gap, SIZE_128M, (task_size / 6) * 5);
/* * This function, called very early during the creation of a new * process VM image, sets up which VM layout function to use:
*/ staticvoid arch_pick_mmap_base(unsignedlong *base, unsignedlong *legacy_base, unsignedlong random_factor, unsignedlong task_size, struct rlimit *rlim_stack)
{
*legacy_base = mmap_legacy_base(random_factor, task_size); if (mmap_is_legacy())
*base = *legacy_base; else
*base = mmap_base(random_factor, task_size, rlim_stack);
}
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES /* * The mmap syscall mapping base decision depends solely on the * syscall type (64-bit or compat). This applies for 64bit * applications and 32bit applications. The 64bit syscall uses * mmap_base, the compat syscall uses mmap_compat_base.
*/
arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
arch_rnd(mmap32_rnd_bits), task_size_32bit(),
rlim_stack); #endif
}
/** * mmap_address_hint_valid - Validate the address hint of mmap * @addr: Address hint * @len: Mapping length * * Check whether @addr and @addr + @len result in a valid mapping. * * On 32bit this only checks whether @addr + @len is <= TASK_SIZE. * * On 64bit with 5-level page tables another sanity check is required * because mappings requested by mmap(@addr, 0) which cross the 47-bit * virtual address boundary can cause the following theoretical issue: * * An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr * is below the border of the 47-bit address space and @addr + @len is * above the border. * * With 4-level paging this request succeeds, but the resulting mapping * address will always be within the 47-bit virtual address space, because * the hint address does not result in a valid mapping and is * ignored. Hence applications which are not prepared to handle virtual * addresses above 47-bit work correctly. * * With 5-level paging this request would be granted and result in a * mapping which crosses the border of the 47-bit virtual address * space. If the application cannot handle addresses above 47-bit this * will lead to misbehaviour and hard to diagnose failures. * * Therefore ignore address hints which would result in a mapping crossing * the 47-bit virtual address boundary. * * Note, that in the same scenario with MAP_FIXED the behaviour is * different. The request with @addr < 47-bit and @addr + @len > 47-bit * fails on a 4-level paging machine but succeeds on a 5-level paging * machine. It is reasonable to expect that an application does not rely on * the failure of such a fixed mapping request, so the restriction is not * applied.
*/ bool mmap_address_hint_valid(unsignedlong addr, unsignedlong len)
{ if (TASK_SIZE - len < addr) returnfalse;
/* Can we access it for direct reading/writing? Must be RAM: */ int valid_phys_addr_range(phys_addr_t addr, size_t count)
{ return addr + count - 1 <= __pa(high_memory - 1);
}
/* Can we access it through mmap? Must be a valid physical address: */ int valid_mmap_phys_addr_range(unsignedlong pfn, size_t count)
{
phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
return phys_addr_valid(addr + count - 1);
}
/* * Only allow root to set high MMIO mappings to PROT_NONE. * This prevents an unpriv. user to set them to PROT_NONE and invert * them, then pointing to valid memory for L1TF speculation. * * Note: for locked down kernels may want to disable the root override.
*/ bool pfn_modify_allowed(unsignedlong pfn, pgprot_t prot)
{ if (!boot_cpu_has_bug(X86_BUG_L1TF)) returntrue; if (!__pte_needs_invert(pgprot_val(prot))) returntrue; /* If it's real memory always allow */ if (pfn_valid(pfn)) returntrue; if (pfn >= l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN)) returnfalse; returntrue;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.