/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1994 - 2000 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*/ #include <linux/bug.h> #include <linux/init.h> #include <linux/export.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/pagemap.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/memblock.h> #include <linux/highmem.h> #include <linux/swap.h> #include <linux/proc_fs.h> #include <linux/pfn.h> #include <linux/hardirq.h> #include <linux/gfp.h> #include <linux/kcore.h> #include <linux/initrd.h> #include <linux/execmem.h>
/* * We have up to 8 empty zeroed pages so we can map one of the right colour * when needed. This is necessary only on R4000 / R4400 SC and MC versions * where we have to avoid VCED / VECI exceptions for good performance at * any price. Since page is never written to after the initialization we * don't have to care about aliases on other CPUs.
*/ unsignedlong empty_zero_page, zero_page_mask;
EXPORT_SYMBOL_GPL(empty_zero_page);
EXPORT_SYMBOL(zero_page_mask);
/* * Not static inline because used by IP27 special magic initialization code
*/ staticvoid __init setup_zero_pages(void)
{ unsignedint order;
vto = kmap_atomic(to); if (cpu_has_dc_aliases &&
folio_mapped(src) && !folio_test_dcache_dirty(src)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent();
} else {
vfrom = kmap_atomic(from);
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
} if ((!cpu_has_ic_fills_f_dc) ||
pages_do_alias((unsignedlong)vto, vaddr & PAGE_MASK))
flush_data_cache_page((unsignedlong)vto);
kunmap_atomic(vto); /* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
num_configured = maar_config(wi.cfg, wi.num_cfg, num_pairs); if (num_configured < wi.num_cfg)
pr_warn("Not enough MAAR pairs (%u) for all memory regions (%u)\n",
num_pairs, wi.num_cfg);
/* Detect the number of MAARs */
write_c0_maari(~0);
back_to_back_c0_hazard();
num_maars = read_c0_maari() + 1;
/* MAARs should be in pairs */
WARN_ON(num_maars % 2);
/* Set MAARs using values we recorded already */ if (recorded.used) {
used = maar_config(recorded.cfgs, recorded.used, num_maars / 2);
BUG_ON(used != recorded.used);
} else { /* Configure the required MAARs */
used = platform_maar_init(num_maars / 2);
}
/* Disable any further MAARs */ for (i = (used * 2); i < num_maars; i++) {
write_c0_maari(i);
back_to_back_c0_hazard();
write_c0_maar(0);
back_to_back_c0_hazard();
}
if (recorded.used) return;
pr_info("MAAR configuration:\n"); for (i = 0; i < num_maars; i += 2) {
write_c0_maari(i);
back_to_back_c0_hazard();
upper = read_c0_maar(); #ifdef CONFIG_XPA
upper |= (phys_addr_t)readx_c0_maar() << MIPS_MAARX_ADDR_SHIFT; #endif
pr_info(" [%d]: ", i / 2); if ((attr & MIPS_MAAR_V) != MIPS_MAAR_V) {
pr_cont("disabled\n"); continue;
}
pr_cont("%pa-%pa", &lower, &upper);
if (attr & MIPS_MAAR_S)
pr_cont(" speculate");
pr_cont("\n");
/* Record the setup for use on secondary CPUs */ if (used <= ARRAY_SIZE(recorded.cfgs)) {
recorded.cfgs[recorded.used].lower = lower;
recorded.cfgs[recorded.used].upper = upper;
recorded.cfgs[recorded.used].attrs = attr;
recorded.used++;
}
}
}
void __init arch_mm_preinit(void)
{ /* * When PFN_PTE_SHIFT is greater than PAGE_SHIFT we won't have enough PTE * bits to hold a full 32b physical address on MIPS32 systems.
*/
BUILD_BUG_ON(IS_ENABLED(CONFIG_32BIT) && (PFN_PTE_SHIFT > PAGE_SHIFT));
#ifdef CONFIG_64BIT if ((unsignedlong) &_text > (unsignedlong) CKSEG0) /* The -4 is a hack so that user tools don't have to handle
the overflow. */
kclist_add(&kcore_kseg0, (void *) CKSEG0,
0x80000000 - 4, KCORE_TEXT); #endif
} #else/* CONFIG_NUMA */ void __init arch_mm_preinit(void)
{
setup_zero_pages(); /* This comes from node 0 */
} #endif/* !CONFIG_NUMA */
void __weak __init prom_free_prom_memory(void)
{ /* nothing to do */
}
void __ref free_initmem(void)
{
prom_free_prom_memory(); /* * Let the platform define a specific function to free the * init section since EVA may have used any possible mapping * between virtual and physical addresses.
*/ if (free_init_pages_eva)
free_init_pages_eva((void *)&__init_begin, (void *)&__init_end); else
free_initmem_default(POISON_FREE_INITMEM);
}
/* * Align swapper_pg_dir in to 64K, allows its address to be loaded * with a single LUI instruction in the TLB handlers. If we used * __aligned(64K), its size would get rounded up to the alignment * size, and waste space. So we place it in its own section and align * it in the linker script.
*/
pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); #ifndef __PAGETABLE_PUD_FOLDED
pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss; #endif #ifndef __PAGETABLE_PMD_FOLDED
pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
EXPORT_SYMBOL_GPL(invalid_pmd_table); #endif
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
EXPORT_SYMBOL(invalid_pte_table);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.