// SPDX-License-Identifier: GPL-2.0-only /* * Memory subsystem initialization for Hexagon * * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
*/
/* * Define a startpg just past the end of the kernel image and a lastpg * that corresponds to the end of real or simulated platform memory.
*/ #define bootmem_startpg (PFN_UP(((unsignedlong) _end) - PAGE_OFFSET + PHYS_OFFSET))
unsignedlong bootmem_lastpg; /* Should be set by platform code */ unsignedlong __phys_offset; /* physical kernel offset >> 12 */
/* Set as variable to limit PMD copies */ int max_kernel_seg = 0x303;
/* indicate pfn's of high memory */ unsignedlong highstart_pfn, highend_pfn;
/* Default cache attribute for newly created page tables */ unsignedlong _dflt_cache_att = CACHEDEF;
/* * The current "generation" of kernel map, which should not roll * over until Hell freezes over. Actual bound in years needs to be * calculated to confirm.
*/
DEFINE_SPINLOCK(kmap_gen_lock);
/* checkpatch says don't init this to 0. */ unsignedlonglong kmap_generation;
/* * In order to set up page allocator "nodes", * somebody has to call free_area_init() for UMA. * * In this mode, we only have one pg_data_t * structure: contig_mem_data.
*/ staticvoid __init paging_init(void)
{ unsignedlong max_zone_pfn[MAX_NR_ZONES] = {0, };
/* * This is not particularly well documented anywhere, but * give ZONE_NORMAL all the memory, including the big holes * left by the kernel+bootmem_map which are already left as reserved * in the bootmem_map; free_area_init should see those bits and * adjust accordingly.
*/
max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfn); /* sets up the zonelists and mem_map */
/* * Set the init_mm descriptors "context" value to point to the * initial kernel segment table's physical address.
*/
init_mm.context.ptbase = __pa(init_mm.pgd);
}
/* * Pick out the memory size. We look for mem=size, * where size is "size[KkMm]"
*/ staticint __init early_mem(char *p)
{ unsignedlong size; char *endp;
/* * The default VM page tables (will be) populated with * VA=PA+PAGE_OFFSET mapping. We go in and invalidate entries * higher than what we have memory for.
*/
/* this is pointer arithmetic; each entry covers 4MB */
segtable = segtable + (PAGE_OFFSET >> 22);
/* this actually only goes to the end of the first gig */
segtable_end = segtable + (1<<(30-22));
/* * Move forward to the start of empty pages; take into account * phys_offset shift.
*/
segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
{ int i;
for (i = 1 ; i <= DMA_RESERVE ; i++)
segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
| __HEXAGON_C_UNC << 6
| __HVM_PDE_S_4MB);
}
printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
segtable_end); while (segtable < (segtable_end-8))
*(segtable++) = __HVM_PDE_S_INVALID; /* stop the pointer at the device I/O 4MB page */
printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
segtable);
#if 0 /* Other half of the early device table from vm_init_segtable. */
printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
(unsignedlong) _K_init_devicetable-PAGE_OFFSET);
*segtable = ((u32) (unsignedlong) _K_init_devicetable-PAGE_OFFSET) |
__HVM_PDE_S_4KB;
printk(KERN_INFO "*segtable = 0x%08x\n", *segtable); #endif
/* * The bootmem allocator seemingly just lives to feed memory * to the paging system
*/
printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
paging_init(); /* See Gorman Book, 2.3 */
/* * At this point, the page allocator is kind of initialized, but * apparently no pages are available (just like with the bootmem * allocator), and need to be freed themselves via mem_init(), * which is called by start_kernel() later on in the process
*/
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.