#ifdef CONFIG_SPARSEMEM_VMEMMAP /* * On Book3E CPUs, the vmemmap is currently mapped in the top half of * the vmalloc space using normal page tables, though the size of * pages encoded in the PTEs can be different
*/ int __meminit vmemmap_create_mapping(unsignedlong start, unsignedlong page_size, unsignedlong phys)
{ /* Create a PTE encoding without page size */ unsignedlong i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
_PAGE_KERNEL_RW;
/* PTEs only contain page size encodings up to 32M */
BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].shift - 10 > 0xf);
/* Encode the size in the PTE */
flags |= (mmu_psize_defs[mmu_vmemmap_psize].shift - 10) << 8;
/* For each PTE for that area, map things. Note that we don't * increment phys because all PTEs are of the large size and * thus must have the low bits clear
*/ for (i = 0; i < page_size; i += PAGE_SIZE)
BUG_ON(map_kernel_page(start + i, phys, __pgprot(flags)));
if (!ptr)
panic("%s: Failed to allocate %lu bytes align=0x%lx max_addr=%lx\n",
__func__, size, size, __pa(MAX_DMA_ADDRESS));
return ptr;
}
/* * map_kernel_page currently only called by __ioremap * map_kernel_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it
*/ int __ref map_kernel_page(unsignedlong ea, phys_addr_t pa, pgprot_t prot)
{
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
/* * Our exceptions vectors start with a NOP and -then- a branch * to deal with single stepping from userspace which stops on * the second instruction. Thus we need to patch the second * instruction of the exception, not the first one.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.