staticvoid vmem_pte_free(unsignedlong *table)
{ /* We don't expect boot memory to be removed ever. */ if (!slab_is_available() ||
WARN_ON_ONCE(PageReserved(virt_to_page(table)))) return;
page_table_free(&init_mm, table);
}
#define PAGE_UNUSED 0xFD
/* * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges * from unused_sub_pmd_start to next PMD_SIZE boundary.
*/ staticunsignedlong unused_sub_pmd_start;
staticvoid vmemmap_mark_sub_pmd_used(unsignedlong start, unsignedlong end)
{ /* * As we expect to add in the same granularity as we remove, it's * sufficient to mark only some piece used to block the memmap page from * getting removed (just in case the memmap never gets initialized, * e.g., because the memory block never gets onlined).
*/
memset((void *)start, 0, sizeof(struct page));
}
staticvoid vmemmap_use_sub_pmd(unsignedlong start, unsignedlong end)
{ /* * We only optimize if the new used range directly follows the * previously unused range (esp., when populating consecutive sections).
*/ if (unused_sub_pmd_start == start) {
unused_sub_pmd_start = end; if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
unused_sub_pmd_start = 0; return;
}
vmemmap_flush_unused_sub_pmd();
vmemmap_mark_sub_pmd_used(start, end);
}
/* Could be our memmap page is filled with PAGE_UNUSED already ... */
vmemmap_mark_sub_pmd_used(start, end);
/* Mark the unused parts of the new memmap page PAGE_UNUSED. */ if (!IS_ALIGNED(start, PMD_SIZE))
memset((void *)page, PAGE_UNUSED, start - page); /* * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of * consecutive sections. Remember for the last added PMD the last * unused range in the populated PMD.
*/ if (!IS_ALIGNED(end, PMD_SIZE))
unused_sub_pmd_start = end;
}
/* Returns true if the PMD is completely unused and can be freed. */ staticbool vmemmap_unuse_sub_pmd(unsignedlong start, unsignedlong end)
{ unsignedlong page = ALIGN_DOWN(start, PMD_SIZE);
staticvoid try_free_pte_table(pmd_t *pmd, unsignedlong start)
{
pte_t *pte; int i;
/* We can safely assume this is fully in 1:1 mapping & vmemmap area */
pte = pte_offset_kernel(pmd, start); for (i = 0; i < PTRS_PER_PTE; i++, pte++) { if (!pte_none(*pte)) return;
}
vmem_pte_free((unsignedlong *) pmd_deref(*pmd));
pmd_clear(pmd);
}
/* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */ staticint __ref modify_pmd_table(pud_t *pud, unsignedlong addr, unsignedlong end, bool add, bool direct, struct vmem_altmap *altmap)
{ unsignedlong next, prot, pages = 0; int ret = -ENOMEM;
pmd_t *pmd;
pte_t *pte;
prot = pgprot_val(SEGMENT_KERNEL);
pmd = pmd_offset(pud, addr); for (; addr < end; addr = next, pmd++) {
next = pmd_addr_end(addr, end); if (!add) { if (pmd_none(*pmd)) continue; if (pmd_leaf(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) { if (!direct)
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
pmd_clear(pmd);
pages++;
} elseif (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE), altmap);
pmd_clear(pmd);
} continue;
}
} elseif (pmd_none(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE) &&
cpu_has_edat1() && direct &&
!debug_pagealloc_enabled()) {
set_pmd(pmd, __pmd(__pa(addr) | prot));
pages++; continue;
} elseif (!direct && cpu_has_edat1()) { void *new_page;
/* * Use 1MB frames for vmemmap if available. We * always use large frames even if they are only * partially used. Otherwise we would have also * page tables since vmemmap_populate gets * called for each section separately.
*/
new_page = vmemmap_alloc_block_buf(PMD_SIZE, NUMA_NO_NODE, altmap); if (new_page) {
set_pmd(pmd, __pmd(__pa(new_page) | prot)); if (!IS_ALIGNED(addr, PMD_SIZE) ||
!IS_ALIGNED(next, PMD_SIZE)) {
vmemmap_use_new_sub_pmd(addr, next);
} continue;
}
}
pte = vmem_pte_alloc(); if (!pte) goto out;
pmd_populate(&init_mm, pmd, pte);
} elseif (pmd_leaf(*pmd)) { if (!direct)
vmemmap_use_sub_pmd(addr, next); continue;
}
ret = modify_pte_table(pmd, addr, next, add, direct, altmap); if (ret) goto out; if (!add)
try_free_pte_table(pmd, addr & PMD_MASK);
}
ret = 0;
out: if (direct)
update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages); return ret;
}
staticvoid try_free_pmd_table(pud_t *pud, unsignedlong start)
{
pmd_t *pmd; int i;
pmd = pmd_offset(pud, start); for (i = 0; i < PTRS_PER_PMD; i++, pmd++) if (!pmd_none(*pmd)) return;
vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER, NULL);
pud_clear(pud);
}
if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end))) return -EINVAL; /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ if (WARN_ON_ONCE(end > __abs_lowcore)) return -EINVAL; for (addr = start; addr < end; addr = next) {
next = pgd_addr_end(addr, end);
pgd = pgd_offset_k(addr);
if (!add) { if (pgd_none(*pgd)) continue;
} elseif (pgd_none(*pgd)) {
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); if (!p4d) goto out;
pgd_populate(&init_mm, pgd, p4d);
}
ret = modify_p4d_table(pgd, addr, next, add, direct, altmap); if (ret) goto out; if (!add)
try_free_p4d_table(pgd, addr & PGDIR_MASK);
}
ret = 0;
out: if (!add)
flush_tlb_kernel_range(start, end); return ret;
}
/* * Add a physical memory range to the 1:1 mapping.
*/ staticint vmem_add_range(unsignedlong start, unsignedlong size)
{
start = (unsignedlong)__va(start); return add_pagetable(start, start + size, true, NULL);
}
/* * Remove a physical memory range from the 1:1 mapping.
*/ staticvoid vmem_remove_range(unsignedlong start, unsignedlong size)
{
start = (unsignedlong)__va(start);
remove_pagetable(start, start + size, true, NULL);
}
/* * Add a backed mem_map array to the virtual mem_map array.
*/ int __meminit vmemmap_populate(unsignedlong start, unsignedlong end, int node, struct vmem_altmap *altmap)
{ int ret;
mutex_lock(&vmem_mutex); /* We don't care about the node, just use NUMA_NO_NODE on allocations */
ret = add_pagetable(start, end, false, altmap); if (ret)
remove_pagetable(start, end, false, altmap);
mutex_unlock(&vmem_mutex); return ret;
}
mutex_lock(&vmem_mutex);
ret = vmem_add_range(start, size); if (ret)
vmem_remove_range(start, size);
mutex_unlock(&vmem_mutex); return ret;
}
/* * Allocate new or return existing page-table entry, but do not map it * to any physical address. If missing, allocate segment- and region- * table entries along. Meeting a large segment- or region-table entry * while traversing is an error, since the function is expected to be * called against virtual regions reserved for 4KB mappings only.
*/
pte_t *vmem_get_alloc_pte(unsignedlong addr, bool alloc)
{
pte_t *ptep = NULL;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(addr); if (pgd_none(*pgd)) { if (!alloc) goto out;
p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY); if (!p4d) goto out;
pgd_populate(&init_mm, pgd, p4d);
}
p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { if (!alloc) goto out;
pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY); if (!pud) goto out;
p4d_populate(&init_mm, p4d, pud);
}
pud = pud_offset(p4d, addr); if (pud_none(*pud)) { if (!alloc) goto out;
pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY); if (!pmd) goto out;
pud_populate(&init_mm, pud, pmd);
} elseif (WARN_ON_ONCE(pud_leaf(*pud))) { goto out;
}
pmd = pmd_offset(pud, addr); if (pmd_none(*pmd)) { if (!alloc) goto out;
pte = vmem_pte_alloc(); if (!pte) goto out;
pmd_populate(&init_mm, pmd, pte);
} elseif (WARN_ON_ONCE(pmd_leaf(*pmd))) { goto out;
}
ptep = pte_offset_kernel(pmd, addr);
out: return ptep;
}
void __init vmem_map_init(void)
{
__set_memory_rox(_stext, _etext);
__set_memory_ro(_etext, __end_rodata);
__set_memory_rox(__stext_amode31, __etext_amode31); /* * If the BEAR-enhancement facility is not installed the first * prefix page is used to return to the previous context with * an LPSWE instruction and therefore must be executable.
*/ if (!cpu_has_bear())
set_memory_x(0, 1); if (debug_pagealloc_enabled())
__set_memory_4k(__va(0), absolute_pointer(__va(0)) + ident_map_size);
pr_info("Write protected kernel read-only data: %luk\n",
(unsignedlong)(__end_rodata - _stext) >> 10);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.