// SPDX-License-Identifier: GPL-2.0-or-later /* * This file contains the routines for handling the MMU on those * PowerPC implementations where the MMU substantially follows the * architecture specification. This includes the 6xx, 7xx, 7xxx, * and 8260 implementations but excludes the 8xx and 4xx. * -- paulus * * Derived from arch/ppc/mm/init.c: * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*/
/* * Return PA for this VA if it is mapped by a BAT, or 0
*/
phys_addr_t v_block_mapped(unsignedlong va)
{ int b; for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (va >= bat_addrs[b].start && va < bat_addrs[b].limit) return bat_addrs[b].phys + (va - bat_addrs[b].start); return 0;
}
/* * Return VA for a given PA or 0 if not mapped
*/ unsignedlong p_block_mapped(phys_addr_t pa)
{ int b; for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b) if (pa >= bat_addrs[b].phys
&& pa < (bat_addrs[b].limit-bat_addrs[b].start)
+bat_addrs[b].phys) return bat_addrs[b].start+(pa-bat_addrs[b].phys); return 0;
}
int __init find_free_bat(void)
{ int b; int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
for (b = 0; b < n; b++) { struct ppc_bat *bat = BATS[b];
if (!(bat[1].batu & 3)) return b;
} return -1;
}
/* * This function calculates the size of the larger block usable to map the * beginning of an area based on the start address and size of that area: * - max block size is 256 on 6xx. * - base address must be aligned to the block size. So the maximum block size * is identified by the lowest bit set to 1 in the base address (for instance * if base is 0x16000000, max size is 0x02000000). * - block size has to be a power of two. This is calculated by finding the * highest bit set to 1.
*/ unsignedint bat_block_size(unsignedlong base, unsignedlong top)
{ unsignedint max_size = SZ_256M; unsignedint base_shift = (ffs(base) - 1) & 31; unsignedint block_shift = (fls(top - base) - 1) & 31;
/* * Set up one of the IBAT (block address translation) register pairs. * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M.
*/ staticvoid setibat(int index, unsignedlong virt, phys_addr_t phys, unsignedint size, pgprot_t prot)
{ unsignedint bl = (size >> 17) - 1; int wimgxpp; struct ppc_bat *bat = BATS[index]; unsignedlong flags = pgprot_val(prot);
if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
flags &= ~_PAGE_COHERENT;
if (debug_pagealloc_enabled_or_kfence()) {
pr_debug_once("Read-Write memory mapped without BATs\n"); if (base >= border) return base; if (top >= border)
top = border;
}
if (!strict_kernel_rwx_enabled() || base >= border || top <= border) return __mmu_mapin_ram(base, top);
done = __mmu_mapin_ram(base, border); if (done != border) return done;
return __mmu_mapin_ram(border, top);
}
staticbool is_module_segment(unsignedlong addr)
{ if (!IS_ENABLED(CONFIG_EXECMEM)) returnfalse; if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M)) returnfalse; if (addr > ALIGN(MODULES_END, SZ_256M) - 1) returnfalse; returntrue;
}
int mmu_mark_initmem_nx(void)
{ int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; unsignedlong base = (unsignedlong)_stext - PAGE_OFFSET; unsignedlong top = ALIGN((unsignedlong)_etext - PAGE_OFFSET, SZ_128K); unsignedlong border = (unsignedlong)__init_begin - PAGE_OFFSET; unsignedlong size;
for (i = 0; i < nb - 1 && base < top;) {
size = bat_block_size(base, top);
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
base += size;
} if (base < top) {
size = bat_block_size(base, top); if ((top - base) > size) {
size <<= 1; if (strict_kernel_rwx_enabled() && base + size > border)
pr_warn("Some RW data is getting mapped X. " "Adjust CONFIG_DATA_SHIFT to avoid that.\n");
}
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
base += size;
} for (; i < nb; i++)
clearibat(i);
/* * Set up one of the D BAT (block address translation) register pairs. * The parameters are not checked; in particular size must be a power * of 2 between 128k and 256M.
*/ void __init setbat(int index, unsignedlong virt, phys_addr_t phys, unsignedint size, pgprot_t prot)
{ unsignedint bl; int wimgxpp; struct ppc_bat *bat; unsignedlong flags = pgprot_val(prot);
if (index == -1)
index = find_free_bat(); if (index == -1) {
pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
(unsignedlonglong)phys); return;
}
bat = BATS[index];
/* * Preload a translation in the hash table
*/ staticvoid hash_preload(struct mm_struct *mm, unsignedlong ea)
{
pmd_t *pmd;
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) return;
pmd = pmd_off(mm, ea); if (!pmd_none(*pmd))
add_hash_page(mm->context.id, ea, pmd_val(*pmd));
}
/* * This is called at the end of handling a user page fault, when the * fault has been handled by updating a PTE in the linux page tables. * We use it to preload an HPTE into the hash table corresponding to * the updated linux PTE. * * This must always be called with the pte lock held.
*/ void __update_mmu_cache(struct vm_area_struct *vma, unsignedlong address,
pte_t *ptep)
{ /* * We don't need to worry about _PAGE_PRESENT here because we are * called with either mm->page_table_lock held or ptl lock held
*/
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */ if (!pte_young(*ptep) || address >= TASK_SIZE) return;
/* We have to test for regs NULL since init will get here first thing at boot */ if (!current->thread.regs) return;
/* We also avoid filling the hash if not coming from a fault */ if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400) return;
hash_preload(vma->vm_mm, address);
}
/* * Initialize the hash table and patch the instructions in hashtable.S.
*/ void __init MMU_init_hw(void)
{ unsignedint n_hpteg, lg_n_hpteg;
if (!mmu_has_feature(MMU_FTR_HPTE_TABLE)) return;
if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
/* * Allow 1 HPTE (1/8 HPTEG) for each page of memory. * This is less than the recommended amount, but then * Linux ain't AIX.
*/
n_hpteg = total_memory / (PAGE_SIZE * 8); if (n_hpteg < MIN_N_HPTEG)
n_hpteg = MIN_N_HPTEG;
lg_n_hpteg = __ilog2(n_hpteg); if (n_hpteg & (n_hpteg - 1)) {
++lg_n_hpteg; /* round up if not power of 2 */
n_hpteg = 1 << lg_n_hpteg;
}
Hash_size = n_hpteg << LG_HPTEG_SIZE;
/* * Find some memory for the hash table.
*/ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Hash = memblock_alloc_or_panic(Hash_size, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
(unsignedlonglong)(total_memory >> 20), Hash_size >> 10);
if (ppc_md.progress)
ppc_md.progress("hash:patch", 0x345); if (ppc_md.progress)
ppc_md.progress("hash:done", 0x205);
/* WARNING: Make sure nothing can trigger a KASAN check past this point */
/* * Patch up the instructions in hashtable.S:create_hpte
*/
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
/* * Patch up the instructions in hashtable.S:flush_hash_page
*/
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
}
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
phys_addr_t first_memblock_size)
{ /* We don't currently support the first MEMBLOCK not mapping 0 * physical on those processors
*/
BUG_ON(first_memblock_base != 0);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.