// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
/* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded.
*/ static pmd_t * __init one_md_table_init(pgd_t *pgd)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd_table;
/* * Create a page table and place a pointer to it in a middle page * directory entry:
*/ static pte_t * __init one_page_table_init(pmd_t *pmd)
{ if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
pte_t *page_table = (pte_t *)alloc_low_page();
static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsignedlong vaddr, pte_t *lastpte, void **adr)
{ #ifdef CONFIG_HIGHMEM /* * Something (early fixmap) may already have put a pte * page here, which causes the page table allocation * to become nonlinear. Attempt to fix it, and if it * is still nonlinear then we have to bug.
*/ int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
/* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in * the given range. * * NOTE: The pagetables are allocated contiguous on the physical space * so we can cache the place of the first one and move around without * checking the pgd every time.
*/ staticvoid __init
page_table_range_init(unsignedlong start, unsignedlong end, pgd_t *pgd_base)
{ int pgd_idx, pmd_idx; unsignedlong vaddr;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte = NULL; unsignedlong count = page_table_range_init_count(start, end); void *adr = NULL;
/* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET:
*/ unsignedlong __init
kernel_physical_mapping_init(unsignedlong start, unsignedlong end, unsignedlong page_size_mask,
pgprot_t prot)
{ int use_pse = page_size_mask == (1<<PG_LEVEL_2M); unsignedlong last_map_addr = end; unsignedlong start_pfn, end_pfn;
pgd_t *pgd_base = swapper_pg_dir; int pgd_idx, pmd_idx, pte_ofs; unsignedlong pfn;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte; unsigned pages_2m, pages_4k; int mapping_iter;
start_pfn = start >> PAGE_SHIFT;
end_pfn = end >> PAGE_SHIFT;
/* * First iteration will setup identity mapping using large/small pages * based on use_pse, with other attributes same as set by * the early code in head_32.S * * Second iteration will setup the appropriate attributes (NX, GLOBAL..) * as desired for the kernel identity mapping. * * This two pass mechanism conforms to the TLB app note which says: * * "Software should not write to a paging-structure entry in a way * that would change, for any linear address, both the page size * and either the page frame or attributes."
*/
mapping_iter = 1;
/* * Map with big pages if possible, otherwise * create normal page tables:
*/ if (use_pse) { unsignedint addr2;
pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial * identity mapping attribute + _PAGE_PSE.
*/
pgprot_t init_prot =
__pgprot(PTE_IDENT_ATTR |
_PAGE_PSE);
/* * sync back low identity map too. It is used for example * in the 32-bit EFI stub.
*/
clone_pgd_range(initial_page_table,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
}
/* * Remove any mappings which extend past the end of physical * memory from the boot time page table. * In virtual address space, we should have at least two pages * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END * definition. And max_low_pfn is set to VMALLOC_END physical * address. If initial memory mapping is doing right job, we * should have pte used near max_low_pfn or one pmd is not present.
*/ for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
pgd = base + pgd_index(va); if (!pgd_present(*pgd)) break;
/* should not be large page here */ if (pmd_leaf(*pmd)) {
pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
pfn, pmd, __pa(pmd));
BUG_ON(1);
}
pte = pte_offset_kernel(pmd, va); if (!pte_present(*pte)) break;
/* * Build a proper pagetable for the kernel mappings. Up until this * point, we've been running on some set of pagetables constructed by * the boot process. * * This will be a pagetable constructed in arch/x86/kernel/head_32.S. * The root of the pagetable will be swapper_pg_dir. * * In general, pagetable_init() assumes that the pagetable may already * be partially populated, and so it avoids stomping on any existing * mappings.
*/ void __init early_ioremap_page_table_range_init(void)
{
pgd_t *pgd_base = swapper_pg_dir; unsignedlong vaddr, end;
/* * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap():
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
page_table_range_init(vaddr, end, pgd_base);
early_ioremap_reset();
}
#define DEFAULT_PTE_MASK ~(_PAGE_NX | _PAGE_GLOBAL) /* Bits supported by the hardware: */
pteval_t __supported_pte_mask __read_mostly = DEFAULT_PTE_MASK; /* Bits allowed in normal kernel mappings: */
pteval_t __default_kernel_pte_mask __read_mostly = DEFAULT_PTE_MASK;
EXPORT_SYMBOL_GPL(__supported_pte_mask); /* Used in PAGE_KERNEL_* macros which are reasonably used out-of-tree: */
EXPORT_SYMBOL(__default_kernel_pte_mask);
/* * highmem=size forces highmem to be exactly 'size' bytes. * This works even on boxes that have no highmem otherwise. * This also works to reduce highmem size on bigger boxes.
*/ staticint __init parse_highmem(char *arg)
{ if (!arg) return -EINVAL;
#define MSG_HIGHMEM_TOO_BIG \ "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
#define MSG_LOWMEM_TOO_SMALL \ "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" /* * All of RAM fits into lowmem - but if user wants highmem * artificially via the highmem=x boot parameter then create * it:
*/ staticvoid __init lowmem_pfn_init(void)
{ /* max_low_pfn is 0, we already have early_res support */
max_low_pfn = max_pfn;
if (highmem_pages == -1)
highmem_pages = 0; #ifdef CONFIG_HIGHMEM if (highmem_pages >= max_pfn) {
printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
highmem_pages = 0;
} if (highmem_pages) { if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
pages_to_mb(highmem_pages));
highmem_pages = 0;
}
max_low_pfn -= highmem_pages;
} #else if (highmem_pages)
printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); #endif
}
#define MSG_HIGHMEM_TRIMMED \ "Warning: only 4GB will be used. Support for CONFIG_HIGHMEM64G was removed!\n" /* * We have more RAM than fits into lowmem - we try to put it into * highmem, also taking the highmem=x boot parameter into account:
*/ staticvoid __init highmem_pfn_init(void)
{
max_low_pfn = MAXMEM_PFN;
if (highmem_pages == -1)
highmem_pages = max_pfn - MAXMEM_PFN;
/* * paging_init() sets up the page tables - note that the first 8MB are * already mapped by head.S. * * This routines also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel.
*/ void __init paging_init(void)
{
pagetable_init();
__flush_tlb_all();
/* * NOTE: at this point the bootmem allocator is fully available.
*/
olpc_dt_build_devicetree();
sparse_init();
zone_sizes_init();
}
/* * Test if the WP bit works in supervisor mode. It isn't supported on 386's * and also on some strange 486's. All 586+'s are OK. This used to involve * black magic jumps to work around some nasty CPU bugs, but fortunately the * switch to using exceptions got rid of all that.
*/ staticvoid __init test_wp_bit(void)
{ char z = 0;
printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode...");
staticvoid mark_nxdata_nx(void)
{ /* * When this called, init has already been executed and released, * so everything past _etext should be NX.
*/ unsignedlong start = PFN_ALIGN(_etext); /* * This comes from is_x86_32_kernel_text upper limit. Also HPAGE where used:
*/ unsignedlong size = (((unsignedlong)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
if (__supported_pte_mask & _PAGE_NX)
printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
set_memory_nx(start, size >> PAGE_SHIFT);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.