/* * For 32 bit kernels we limit the amount of memory we can * support, in order to preserve enough kernel address space * for other purposes. For 64 bit kernels we don't normally * limit the memory, but this mechanism can be used to * artificially limit the amount of memory (and it is written * to work with multiple memory ranges).
*/
mem_limit_func(); /* check for "mem=" argument */
mem_max = 0; for (i = 0; i < npmem_ranges; i++) { unsignedlong rsize;
/* * We can't use memblock top-down allocations because we only * created the initial mapping up to KERNEL_INITIAL_SIZE in * the assembly bootup code.
*/
memblock_set_bottom_up(true);
/* IOMMU is always used to access "high mem" on those boxes * that can support enough mem that a PCI device couldn't * directly DMA to any physical addresses. * ISA DMA support will need to revisit this.
*/
max_low_pfn = max_pfn;
/* We don't know which region the kernel will be in, so try * all of them.
*/ for (i = 0; i < sysram_resource_count; i++) { struct resource *res = &sysram_resources[i];
request_resource(res, &code_resource);
request_resource(res, &data_resource);
}
request_resource(&sysram_resources[0], &pdcdata_resource);
/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
pdc_pdt_init();
/* Remap kernel text and data, but do not touch init section yet. */
map_pages(init_end, __pa(init_end), kernel_end - init_end,
PAGE_KERNEL, 0);
/* The init text pages are marked R-X. We have to * flush the icache and mark them RW- * * Do a dummy remap of the data section first (the data * section is already PAGE_KERNEL) to pull in the TLB entries
* for map_kernel */
map_pages(init_begin, __pa(init_begin), init_end - init_begin,
PAGE_KERNEL_RWX, 1); /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
* map_pages */
map_pages(init_begin, __pa(init_begin), init_end - init_begin,
PAGE_KERNEL, 1);
/* force the kernel to see the new TLB entries */
__flush_tlb_range(0, init_begin, kernel_end);
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
}
kernel_set_to_readonly = true;
map_pages(start, __pa(start), end - start, PAGE_KERNEL, 0);
/* force the kernel to see the new page table entries */
flush_cache_all();
flush_tlb_all();
} #endif
/* * Just an arbitrary offset to serve as a "hole" between mapping areas * (between top of physical memory and a potential pcxl dma mapping * area, and below the vmalloc mapping area). * * The current 32K value just means that there will be a 32K "hole" * between mapping areas. That means that any out-of-bounds memory * accesses will hopefully be caught. The vmalloc() routines leaves * a hole of 4kB between each vmalloced area for the same reason.
*/
/* Leave room for gateway page expansion */ #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE #error KERNEL_MAP_START is in gateway reserved region #endif #define MAP_START (KERNEL_MAP_START)
/* * pagetable_init() sets up the page tables * * Note that gateway_init() places the Linux gateway page at page 0. * Since gateway pages cannot be dereferenced this has the desirable * side effect of trapping those pesky NULL-reference errors in the * kernel.
*/ staticvoid __init pagetable_init(void)
{ int range;
/* Map each physical memory range to its kernel vaddr */
for (range = 0; range < npmem_ranges; range++) { unsignedlong start_paddr; unsignedlong size;
staticvoid __init gateway_init(void)
{ unsignedlong linux_gateway_page_addr; /* FIXME: This is 'const' in order to trick the compiler
into not treating it as DP-relative data. */ externvoid * const linux_gateway_page;
/* * Setup Linux Gateway page. * * The Linux gateway page will reside in kernel space (on virtual * page 0), so it doesn't need to be aliased into user space.
*/
void __init paging_init(void)
{
setup_bootmem();
pagetable_init();
gateway_init();
fixmap_init();
flush_cache_all_local(); /* start with known state */
flush_tlb_all_local(NULL);
sparse_init();
parisc_bootmem_free();
}
staticvoid alloc_btlb(unsignedlong start, unsignedlong end, int *slot, unsignedlong entry_info)
{ constint slot_max = btlb_info.fixed_range_info.num_comb; int min_num_pages = btlb_info.min_size; unsignedlong size;
/* map at minimum 4 pages */ if (min_num_pages < 4)
min_num_pages = 4;
size = HUGEPAGE_SIZE; while (start < end && *slot < slot_max && size >= PAGE_SIZE) { /* starting address must have same alignment as size! */ /* if correctly aligned and fits in double size, increase */ if (((start & (2 * size - 1)) == 0) &&
(end - start) >= (2 * size)) {
size <<= 1; continue;
} /* if current size alignment is too big, try smaller size */ if ((start & (size - 1)) != 0) {
size >>= 1; continue;
} if ((end - start) >= size) { if ((size >> PAGE_SHIFT) >= min_num_pages)
pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
size >> PAGE_SHIFT, entry_info, *slot);
(*slot)++;
start += size; continue;
}
size /= 2; continue;
}
}
void btlb_init_per_cpu(void)
{ unsignedlong s, t, e; int slot;
/* BTLBs are not available on 64-bit CPUs */ if (IS_ENABLED(CONFIG_PA20)) return; elseif (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
/* insert BLTLBs for code and data segments */
s = (uintptr_t) dereference_function_descriptor(&_stext);
e = (uintptr_t) dereference_function_descriptor(&_etext);
t = (uintptr_t) dereference_function_descriptor(&_sdata);
BUG_ON(t != e);
/* sanity check */
t = (uintptr_t) dereference_function_descriptor(&_edata);
e = (uintptr_t) dereference_function_descriptor(&__bss_start);
BUG_ON(t != e);
/* data segments */
s = (uintptr_t) dereference_function_descriptor(&_sdata);
e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
alloc_btlb(s, e, &slot, 0x11800000);
}
#ifdef CONFIG_PA20
/* * Currently, all PA20 chips have 18 bit protection IDs, which is the * limiting factor (space ids are 32 bits).
*/
#define NR_SPACE_IDS 262144
#else
/* * Currently we have a one-to-one relationship between space IDs and * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only * support 15 bit protection IDs, so that is the limiting factor. * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's * probably not worth the effort for a special case here.
*/
/* * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is * purged, we can safely reuse the space ids that were released but * not flushed from the tlb.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.