// SPDX-License-Identifier: GPL-2.0-or-later /* * Procedures for maintaining information about logical memory blocks. * * Peter Bergner, IBM Corp. June 2001. * Copyright (C) 2001 Peter Bergner.
*/
/** * DOC: memblock overview * * Memblock is a method of managing memory regions during the early * boot period when the usual kernel memory allocators are not up and * running. * * Memblock views the system memory as collections of contiguous * regions. There are several types of these collections: * * * ``memory`` - describes the physical memory available to the * kernel; this may differ from the actual physical memory installed * in the system, for instance when the memory is restricted with * ``mem=`` command line parameter * * ``reserved`` - describes the regions that were allocated * * ``physmem`` - describes the actual physical memory available during * boot regardless of the possible restrictions and memory hot(un)plug; * the ``physmem`` type is only available on some architectures. * * Each region is represented by struct memblock_region that * defines the region extents, its attributes and NUMA node id on NUMA * systems. Every memory type is described by the struct memblock_type * which contains an array of memory regions along with * the allocator metadata. The "memory" and "reserved" types are nicely * wrapped with struct memblock. This structure is statically * initialized at build time. The region arrays are initially sized to * %INIT_MEMBLOCK_MEMORY_REGIONS for "memory" and * %INIT_MEMBLOCK_RESERVED_REGIONS for "reserved". The region array * for "physmem" is initially sized to %INIT_PHYSMEM_REGIONS. * The memblock_allow_resize() enables automatic resizing of the region * arrays during addition of new regions. This feature should be used * with care so that memory allocated for the region array will not * overlap with areas that should be reserved, for example initrd. * * The early architecture setup should tell memblock what the physical * memory layout is by using memblock_add() or memblock_add_node() * functions. The first function does not assign the region to a NUMA * node and it is appropriate for UMA systems. Yet, it is possible to * use it on NUMA systems as well and assign the region to a NUMA node * later in the setup process using memblock_set_node(). The * memblock_add_node() performs such an assignment directly. * * Once memblock is setup the memory can be allocated using one of the * API variants: * * * memblock_phys_alloc*() - these functions return the **physical** * address of the allocated memory * * memblock_alloc*() - these functions return the **virtual** address * of the allocated memory. * * Note, that both API variants use implicit assumptions about allowed * memory ranges and the fallback methods. Consult the documentation * of memblock_alloc_internal() and memblock_alloc_range_nid() * functions for more elaborate description. * * As the system boot progresses, the architecture specific mem_init() * function frees all the memory to the buddy page allocator. * * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the * memblock data structures (except "physmem") will be discarded after the * system initialization completes.
*/
#ifdef CONFIG_MEMBLOCK_KHO_SCRATCH /* When set to true, only allocate from MEMBLOCK_KHO_SCRATCH ranges */ staticbool kho_scratch_only; #else #define kho_scratch_only false #endif
/* * keep a pointer to &memblock.memory in the text section to use it in * __next_mem_range() and its helpers. * For architectures that do not keep memblock data after init, this * pointer will be reset to NULL at memblock_discard()
*/ static __refdata struct memblock_type *memblock_memory = &memblock.memory;
#define for_each_memblock_type(i, memblock_type, rgn) \ for (i = 0, rgn = &memblock_type->regions[0]; \
i < memblock_type->cnt; \
i++, rgn = &memblock_type->regions[i])
#define memblock_dbg(fmt, ...) \ do { \ if (memblock_debug) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
for (i = 0; i < type->cnt; i++) if (memblock_addrs_overlap(base, size, type->regions[i].base,
type->regions[i].size)) returntrue; returnfalse;
}
/** * __memblock_find_range_bottom_up - find free area utility in bottom-up * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @flags: pick from blocks based on memory attributes * * Utility called from memblock_find_in_range_node(), find free area bottom-up. * * Return: * Found address on success, 0 on failure.
*/ static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align, int nid, enum memblock_flags flags)
{
phys_addr_t this_start, this_end, cand;
u64 i;
/** * __memblock_find_range_top_down - find free area utility, in top-down * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @flags: pick from blocks based on memory attributes * * Utility called from memblock_find_in_range_node(), find free area top-down. * * Return: * Found address on success, 0 on failure.
*/ static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align, int nid, enum memblock_flags flags)
{
phys_addr_t this_start, this_end, cand;
u64 i;
/** * memblock_find_in_range_node - find free area in given range and node * @size: size of free area to find * @align: alignment of free area to find * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @flags: pick from blocks based on memory attributes * * Find @size free area aligned to @align in the specified range and node. * * Return: * Found address on success, 0 on failure.
*/ static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
phys_addr_t end, int nid, enum memblock_flags flags)
{ /* pump up @end */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
end == MEMBLOCK_ALLOC_NOLEAKTRACE)
end = memblock.current_limit;
/* avoid allocating the first page */
start = max_t(phys_addr_t, start, PAGE_SIZE);
end = max(start, end);
/** * memblock_find_in_range - find free area in given range * @start: start of candidate range * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * * Find @size free area aligned to @align in the specified range. * * Return: * Found address on success, 0 on failure.
*/ static phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
phys_addr_t end, phys_addr_t size,
phys_addr_t align)
{
phys_addr_t ret; enum memblock_flags flags = choose_memblock_flags();
again:
ret = memblock_find_in_range_node(size, align, start, end,
NUMA_NO_NODE, flags);
if (!ret && (flags & MEMBLOCK_MIRROR)) {
pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
&size);
flags &= ~MEMBLOCK_MIRROR; goto again;
}
/** * memblock_double_array - double the size of the memblock regions array * @type: memblock type of the regions array being doubled * @new_area_start: starting address of memory range to avoid overlap with * @new_area_size: size of memory range to avoid overlap with * * Double the size of the @type regions array. If memblock is being used to * allocate memory for a new reserved regions array and there is a previously * allocated memory range [@new_area_start, @new_area_start + @new_area_size] * waiting to be reserved, ensure the memory used by the new array does * not overlap. * * Return: * 0 on success, -1 on failure.
*/ staticint __init_memblock memblock_double_array(struct memblock_type *type,
phys_addr_t new_area_start,
phys_addr_t new_area_size)
{ struct memblock_region *new_array, *old_array;
phys_addr_t old_alloc_size, new_alloc_size;
phys_addr_t old_size, new_size, addr, new_end; int use_slab = slab_is_available(); int *in_slab;
/* We don't allow resizing until we know about the reserved regions * of memory that aren't suitable for allocation
*/ if (!memblock_can_resize)
panic("memblock: cannot resize %s array\n", type->name);
/* Calculate new doubled size */
old_size = type->max * sizeof(struct memblock_region);
new_size = old_size << 1; /* * We need to allocated new one align to PAGE_SIZE, * so we can free them completely later.
*/
old_alloc_size = PAGE_ALIGN(old_size);
new_alloc_size = PAGE_ALIGN(new_size);
/* Retrieve the slab flag */ if (type == &memblock.memory)
in_slab = &memblock_memory_in_slab; else
in_slab = &memblock_reserved_in_slab;
/* Try to find some space for it */ if (use_slab) {
new_array = kmalloc(new_size, GFP_KERNEL);
addr = new_array ? __pa(new_array) : 0;
} else { /* only exclude range when trying to double reserved.regions */ if (type != &memblock.reserved)
new_area_start = new_area_size = 0;
if (addr) { /* The memory may not have been accepted, yet. */
accept_memory(addr, new_alloc_size);
new_array = __va(addr);
} else {
new_array = NULL;
}
} if (!addr) {
pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
type->name, type->max, type->max * 2); return -1;
}
new_end = addr + new_size - 1;
memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
type->name, type->max * 2, &addr, &new_end);
/* * Found space, we now need to move the array over before we add the * reserved region since it may be our reserved array itself that is * full.
*/
memcpy(new_array, type->regions, old_size);
memset(new_array + type->max, 0, old_size);
old_array = type->regions;
type->regions = new_array;
type->max <<= 1;
/* Free old array. We needn't free it if the array is the static one */ if (*in_slab)
kfree(old_array); elseif (old_array != memblock_memory_init_regions &&
old_array != memblock_reserved_init_regions)
memblock_free(old_array, old_alloc_size);
/* * Reserve the new array if that comes from the memblock. Otherwise, we * needn't do it
*/ if (!use_slab)
BUG_ON(memblock_reserve_kern(addr, new_alloc_size));
/* Update slab flag */
*in_slab = use_slab;
return 0;
}
/** * memblock_merge_regions - merge neighboring compatible regions * @type: memblock type to scan * @start_rgn: start scanning from (@start_rgn - 1) * @end_rgn: end scanning at (@end_rgn - 1) * Scan @type and merge neighboring compatible regions in [@start_rgn - 1, @end_rgn)
*/ staticvoid __init_memblock memblock_merge_regions(struct memblock_type *type, unsignedlong start_rgn, unsignedlong end_rgn)
{ int i = 0; if (start_rgn)
i = start_rgn - 1;
end_rgn = min(end_rgn, type->cnt - 1); while (i < end_rgn) { struct memblock_region *this = &type->regions[i]; struct memblock_region *next = &type->regions[i + 1];
this->size += next->size; /* move forward from next + 1, index of which is i + 2 */
memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
type->cnt--;
end_rgn--;
}
}
/** * memblock_insert_region - insert new memblock region * @type: memblock type to insert into * @idx: index for the insertion point * @base: base address of the new region * @size: size of the new region * @nid: node id of the new region * @flags: flags of the new region * * Insert new memblock region [@base, @base + @size) into @type at @idx. * @type must already have extra room to accommodate the new region.
*/ staticvoid __init_memblock memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base,
phys_addr_t size, int nid, enum memblock_flags flags)
{ struct memblock_region *rgn = &type->regions[idx];
/** * memblock_add_range - add new memblock region * @type: memblock type to add new region into * @base: base address of the new region * @size: size of the new region * @nid: nid of the new region * @flags: flags of the new region * * Add new memblock region [@base, @base + @size) into @type. The new region * is allowed to overlap with existing ones - overlaps don't affect already * existing regions. @type is guaranteed to be minimal (all neighbouring * compatible regions are merged) after the addition. * * Return: * 0 on success, -errno on failure.
*/ staticint __init_memblock memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size, int nid, enum memblock_flags flags)
{ bool insert = false;
phys_addr_t obase = base;
phys_addr_t end = base + memblock_cap_size(base, &size); int idx, nr_new, start_rgn = -1, end_rgn; struct memblock_region *rgn;
if (!size) return 0;
/* special case for empty array */ if (type->regions[0].size == 0) {
WARN_ON(type->cnt != 0 || type->total_size);
type->regions[0].base = base;
type->regions[0].size = size;
type->regions[0].flags = flags;
memblock_set_region_node(&type->regions[0], nid);
type->total_size = size;
type->cnt = 1; return 0;
}
/* * The worst case is when new range overlaps all existing regions, * then we'll need type->cnt + 1 empty regions in @type. So if * type->cnt * 2 + 1 is less than or equal to type->max, we know * that there is enough empty regions in @type, and we can insert * regions directly.
*/ if (type->cnt * 2 + 1 <= type->max)
insert = true;
repeat: /* * The following is executed twice. Once with %false @insert and * then with %true. The first counts the number of regions needed * to accommodate the new area. The second actually inserts them.
*/
base = obase;
nr_new = 0;
if (rbase >= end) break; if (rend <= base) continue; /* * @rgn overlaps. If it separates the lower part of new * area, insert that portion.
*/ if (rbase > base) { #ifdef CONFIG_NUMA
WARN_ON(nid != memblock_get_region_node(rgn)); #endif
WARN_ON(flags != MEMBLOCK_NONE && flags != rgn->flags);
nr_new++; if (insert) { if (start_rgn == -1)
start_rgn = idx;
end_rgn = idx + 1;
memblock_insert_region(type, idx++, base,
rbase - base, nid,
flags);
}
} /* area below @rend is dealt with, forget about it */
base = min(rend, end);
}
/* insert the remaining portion */ if (base < end) {
nr_new++; if (insert) { if (start_rgn == -1)
start_rgn = idx;
end_rgn = idx + 1;
memblock_insert_region(type, idx, base, end - base,
nid, flags);
}
}
if (!nr_new) return 0;
/* * If this was the first round, resize array and repeat for actual * insertions; otherwise, merge and return.
*/ if (!insert) { while (type->cnt + nr_new > type->max) if (memblock_double_array(type, obase, size) < 0) return -ENOMEM;
insert = true; goto repeat;
} else {
memblock_merge_regions(type, start_rgn, end_rgn); return 0;
}
}
/** * memblock_add_node - add new memblock region within a NUMA node * @base: base address of the new region * @size: size of the new region * @nid: nid of the new region * @flags: flags of the new region * * Add new memblock region [@base, @base + @size) to the "memory" * type. See memblock_add_range() description for mode details * * Return: * 0 on success, -errno on failure.
*/ int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, int nid, enum memblock_flags flags)
{
phys_addr_t end = base + size - 1;
/** * memblock_add - add new memblock region * @base: base address of the new region * @size: size of the new region * * Add new memblock region [@base, @base + @size) to the "memory" * type. See memblock_add_range() description for mode details * * Return: * 0 on success, -errno on failure.
*/ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
/** * memblock_validate_numa_coverage - check if amount of memory with * no node ID assigned is less than a threshold * @threshold_bytes: maximal memory size that can have unassigned node * ID (in bytes). * * A buggy firmware may report memory that does not belong to any node. * Check if amount of such memory is below @threshold_bytes. * * Return: true on success, false on failure.
*/ bool __init_memblock memblock_validate_numa_coverage(unsignedlong threshold_bytes)
{ unsignedlong nr_pages = 0; unsignedlong start_pfn, end_pfn, mem_size_mb; int nid, i;
if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
mem_size_mb = memblock_phys_mem_size() / SZ_1M;
pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
(nr_pages << PAGE_SHIFT) / SZ_1M, mem_size_mb); returnfalse;
}
returntrue;
}
/** * memblock_isolate_range - isolate given range into disjoint memblocks * @type: memblock type to isolate range for * @base: base of range to isolate * @size: size of range to isolate * @start_rgn: out parameter for the start of isolated region * @end_rgn: out parameter for the end of isolated region * * Walk @type and ensure that regions don't cross the boundaries defined by * [@base, @base + @size). Crossing regions are split at the boundaries, * which may create at most two more regions. The index of the first * region inside the range is returned in *@start_rgn and the index of the * first region after the range is returned in *@end_rgn. * * Return: * 0 on success, -errno on failure.
*/ staticint __init_memblock memblock_isolate_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size, int *start_rgn, int *end_rgn)
{
phys_addr_t end = base + memblock_cap_size(base, &size); int idx; struct memblock_region *rgn;
*start_rgn = *end_rgn = 0;
if (!size) return 0;
/* we'll create at most two more regions */ while (type->cnt + 2 > type->max) if (memblock_double_array(type, base, size) < 0) return -ENOMEM;
if (rbase >= end) break; if (rend <= base) continue;
if (rbase < base) { /* * @rgn intersects from below. Split and continue * to process the next region - the new top half.
*/
rgn->base = base;
rgn->size -= base - rbase;
type->total_size -= base - rbase;
memblock_insert_region(type, idx, rbase, base - rbase,
memblock_get_region_node(rgn),
rgn->flags);
} elseif (rend > end) { /* * @rgn intersects from above. Split and redo the * current region - the new bottom half.
*/
rgn->base = end;
rgn->size -= end - rbase;
type->total_size -= end - rbase;
memblock_insert_region(type, idx--, rbase, end - rbase,
memblock_get_region_node(rgn),
rgn->flags);
} else { /* @rgn is fully contained, record it */ if (!*end_rgn)
*start_rgn = idx;
*end_rgn = idx + 1;
}
}
return 0;
}
staticint __init_memblock memblock_remove_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size)
{ int start_rgn, end_rgn; int i, ret;
ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret;
for (i = end_rgn - 1; i >= start_rgn; i--)
memblock_remove_region(type, i); return 0;
}
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
/** * memblock_free - free boot memory allocation * @ptr: starting address of the boot memory allocation * @size: size of the boot memory block in bytes * * Free boot memory block previously allocated by memblock_alloc_xx() API. * The freeing memory will not be released to the buddy allocator.
*/ void __init_memblock memblock_free(void *ptr, size_t size)
{ if (ptr)
memblock_phys_free(__pa(ptr), size);
}
/** * memblock_phys_free - free boot memory block * @base: phys starting address of the boot memory block * @size: size of the boot memory block in bytes * * Free boot memory block previously allocated by memblock_phys_alloc_xx() API. * The freeing memory will not be released to the buddy allocator.
*/ int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
{
phys_addr_t end = base + size - 1;
if (!IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) return;
/* * Initialize struct pages for free scratch memory. * The struct pages for reserved scratch memory will be set up in * reserve_bootmem_region()
*/
__for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE,
MEMBLOCK_KHO_SCRATCH, &start, &end, &nid) { for (pfn = PFN_UP(start); pfn < PFN_DOWN(end); pfn++)
init_deferred_page(pfn, nid);
}
} #endif
/** * memblock_setclr_flag - set or clear flag for a memory region * @type: memblock type to set/clear flag for * @base: base address of the region * @size: size of the region * @set: set or clear the flag * @flag: the flag to update * * This function isolates region [@base, @base + @size), and sets/clears flag * * Return: 0 on success, -errno on failure.
*/ staticint __init_memblock memblock_setclr_flag(struct memblock_type *type,
phys_addr_t base, phys_addr_t size, int set, int flag)
{ int i, ret, start_rgn, end_rgn;
ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret;
for (i = start_rgn; i < end_rgn; i++) { struct memblock_region *r = &type->regions[i];
if (set)
r->flags |= flag; else
r->flags &= ~flag;
}
/** * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure.
*/ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{ return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_HOTPLUG);
}
/** * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure.
*/ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{ return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_HOTPLUG);
}
/** * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure.
*/ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
{ if (!mirrored_kernelcore) return 0;
/** * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP. * @base: the base phys addr of the region * @size: the size of the region * * The memory regions marked with %MEMBLOCK_NOMAP will not be added to the * direct mapping of the physical memory. These regions will still be * covered by the memory map. The struct page representing NOMAP memory * frames in the memory map will be PageReserved() * * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from * memblock, the caller must inform kmemleak to ignore that memory * * Return: 0 on success, -errno on failure.
*/ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
{ return memblock_setclr_flag(&memblock.memory, base, size, 1, MEMBLOCK_NOMAP);
}
/** * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure.
*/ int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
{ return memblock_setclr_flag(&memblock.memory, base, size, 0, MEMBLOCK_NOMAP);
}
/** * memblock_reserved_mark_noinit - Mark a reserved memory region with flag * MEMBLOCK_RSRV_NOINIT * * @base: the base phys addr of the region * @size: the size of the region * * The struct pages for the reserved regions marked %MEMBLOCK_RSRV_NOINIT will * not be fully initialized to allow the caller optimize their initialization. * * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled, setting this flag * completely bypasses the initialization of struct pages for such region. * * When %CONFIG_DEFERRED_STRUCT_PAGE_INIT is disabled, struct pages in this * region will be initialized with default values but won't be marked as * reserved. * * Return: 0 on success, -errno on failure.
*/ int __init_memblock memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size)
{ return memblock_setclr_flag(&memblock.reserved, base, size, 1,
MEMBLOCK_RSRV_NOINIT);
}
/** * memblock_mark_kho_scratch - Mark a memory region as MEMBLOCK_KHO_SCRATCH. * @base: the base phys addr of the region * @size: the size of the region * * Only memory regions marked with %MEMBLOCK_KHO_SCRATCH will be considered * for allocations during early boot with kexec handover. * * Return: 0 on success, -errno on failure.
*/
__init int memblock_mark_kho_scratch(phys_addr_t base, phys_addr_t size)
{ return memblock_setclr_flag(&memblock.memory, base, size, 1,
MEMBLOCK_KHO_SCRATCH);
}
/** * memblock_clear_kho_scratch - Clear MEMBLOCK_KHO_SCRATCH flag for a * specified region. * @base: the base phys addr of the region * @size: the size of the region * * Return: 0 on success, -errno on failure.
*/
__init int memblock_clear_kho_scratch(phys_addr_t base, phys_addr_t size)
{ return memblock_setclr_flag(&memblock.memory, base, size, 0,
MEMBLOCK_KHO_SCRATCH);
}
staticbool should_skip_region(struct memblock_type *type, struct memblock_region *m, int nid, int flags)
{ int m_nid = memblock_get_region_node(m);
/* we never skip regions when iterating memblock.reserved or physmem */ if (type != memblock_memory) returnfalse;
/* only memory regions are associated with nodes, check it */ if (numa_valid_node(nid) && nid != m_nid) returntrue;
/* skip hotpluggable memory regions if needed */ if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
!(flags & MEMBLOCK_HOTPLUG)) returntrue;
/* if we want mirror memory skip non-mirror memory regions */ if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m)) returntrue;
/* skip nomap memory unless we were asked for it explicitly */ if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m)) returntrue;
/* skip driver-managed memory unless we were asked for it explicitly */ if (!(flags & MEMBLOCK_DRIVER_MANAGED) && memblock_is_driver_managed(m)) returntrue;
/* * In early alloc during kexec handover, we can only consider * MEMBLOCK_KHO_SCRATCH regions for the allocations
*/ if ((flags & MEMBLOCK_KHO_SCRATCH) && !memblock_is_kho_scratch(m)) returntrue;
returnfalse;
}
/** * __next_mem_range - next function for for_each_free_mem_range() etc. * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @type_a: pointer to memblock_type from where the range is taken * @type_b: pointer to memblock_type which excludes memory from being taken * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL * * Find the first area from *@idx which matches @nid, fill the out * parameters, and update *@idx for the next iteration. The lower 32bit of * *@idx contains index into type_a and the upper 32bit indexes the * areas before each region in type_b. For example, if type_b regions * look like the following, * * 0:[0-16), 1:[32-48), 2:[128-130) * * The upper 32bit indexes the following regions. * * 0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX) * * As both region arrays are sorted, the function advances the two indices * in lockstep and returns each intersection.
*/ void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid)
{ int idx_a = *idx & 0xffffffff; int idx_b = *idx >> 32;
/* * if idx_b advanced past idx_a, * break out to advance idx_a
*/ if (r_start >= m_end) break; /* if the two regions intersect, we're done */ if (m_start < r_end) { if (out_start)
*out_start =
max(m_start, r_start); if (out_end)
*out_end = min(m_end, r_end); if (out_nid)
*out_nid = m_nid; /* * The region which ends first is * advanced for the next iteration.
*/ if (m_end <= r_end)
idx_a++; else
idx_b++;
*idx = (u32)idx_a | (u64)idx_b << 32; return;
}
}
}
/* signal end of iteration */
*idx = ULLONG_MAX;
}
/** * __next_mem_range_rev - generic next function for for_each_*_range_rev() * * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes * @type_a: pointer to memblock_type from where the range is taken * @type_b: pointer to memblock_type which excludes memory from being taken * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL * * Finds the next range from type_a which is not marked as unsuitable * in type_b. * * Reverse of __next_mem_range().
*/ void __init_memblock __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b,
phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid)
{ int idx_a = *idx & 0xffffffff; int idx_b = *idx >> 32;
if (should_skip_region(type_a, m, nid, flags)) continue;
if (!type_b) { if (out_start)
*out_start = m_start; if (out_end)
*out_end = m_end; if (out_nid)
*out_nid = m_nid;
idx_a--;
*idx = (u32)idx_a | (u64)idx_b << 32; return;
}
/* scan areas before each reservation */ for (; idx_b >= 0; idx_b--) { struct memblock_region *r;
phys_addr_t r_start;
phys_addr_t r_end;
r = &type_b->regions[idx_b];
r_start = idx_b ? r[-1].base + r[-1].size : 0;
r_end = idx_b < type_b->cnt ?
r->base : PHYS_ADDR_MAX; /* * if idx_b advanced past idx_a, * break out to advance idx_a
*/
if (r_end <= m_start) break; /* if the two regions intersect, we're done */ if (m_end > r_start) { if (out_start)
*out_start = max(m_start, r_start); if (out_end)
*out_end = min(m_end, r_end); if (out_nid)
*out_nid = m_nid; if (m_start >= r_start)
idx_a--; else
idx_b--;
*idx = (u32)idx_a | (u64)idx_b << 32; return;
}
}
} /* signal end of iteration */
*idx = ULLONG_MAX;
}
/* * Common iterator interface used to define for_each_mem_pfn_range().
*/ void __init_memblock __next_mem_pfn_range(int *idx, int nid, unsignedlong *out_start_pfn, unsignedlong *out_end_pfn, int *out_nid)
{ struct memblock_type *type = &memblock.memory; struct memblock_region *r; int r_nid;
while (++*idx < type->cnt) {
r = &type->regions[*idx];
r_nid = memblock_get_region_node(r);
if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) continue; if (!numa_valid_node(nid) || nid == r_nid) break;
} if (*idx >= type->cnt) {
*idx = -1; return;
}
if (out_start_pfn)
*out_start_pfn = PFN_UP(r->base); if (out_end_pfn)
*out_end_pfn = PFN_DOWN(r->base + r->size); if (out_nid)
*out_nid = r_nid;
}
/** * memblock_set_node - set node ID on memblock regions * @base: base of area to set node ID for * @size: size of area to set node ID for * @type: memblock type to set node ID for * @nid: node ID to set * * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. * Regions which cross the area boundaries are split as necessary. * * Return: * 0 on success, -errno on failure.
*/ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, struct memblock_type *type, int nid)
{ #ifdef CONFIG_NUMA int start_rgn, end_rgn; int i, ret;
ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn); if (ret) return ret;
for (i = start_rgn; i < end_rgn; i++)
memblock_set_region_node(&type->regions[i], nid);
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT /** * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone() * * @idx: pointer to u64 loop variable * @zone: zone in which all of the memory blocks reside * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL * * This function is meant to be a zone/pfn specific wrapper for the * for_each_mem_range type iterators. Specifically they are used in the * deferred memory init routines and as such we were duplicating much of * this logic throughout the code. So instead of having it in multiple * locations it seemed like it would make more sense to centralize this to * one new iterator that does everything they need.
*/ void __init_memblock
__next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone, unsignedlong *out_spfn, unsignedlong *out_epfn)
{ int zone_nid = zone_to_nid(zone);
phys_addr_t spa, epa;
/* * Verify the end is at least past the start of the zone and * that we have at least one PFN to initialize.
*/ if (zone->zone_start_pfn < epfn && spfn < epfn) { /* if we went too far just stop searching */ if (zone_end_pfn(zone) <= spfn) {
*idx = U64_MAX; break;
}
if (out_spfn)
*out_spfn = max(zone->zone_start_pfn, spfn); if (out_epfn)
*out_epfn = min(zone_end_pfn(zone), epfn);
/* signal end of iteration */ if (out_spfn)
*out_spfn = ULONG_MAX; if (out_epfn)
*out_epfn = 0;
}
#endif/* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
/** * memblock_alloc_range_nid - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @start: the lower bound of the memory region to allocate (phys address) * @end: the upper bound of the memory region to allocate (phys address) * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @exact_nid: control the allocation fall back to other nodes * * The allocation is performed from memory region limited by * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE. * * If the specified node can not hold the requested memory and @exact_nid * is false, the allocation falls back to any node in the system. * * For systems with memory mirroring, the allocation is attempted first * from the regions with mirroring enabled and then retried from any * memory region. * * In addition, function using kmemleak_alloc_phys for allocated boot * memory block, it is never reported as leaks. * * Return: * Physical address of allocated memory block on success, %0 on failure.
*/
phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
phys_addr_t end, int nid, bool exact_nid)
{ enum memblock_flags flags = choose_memblock_flags();
phys_addr_t found;
/* * Detect any accidental use of these APIs after slab is ready, as at * this moment memblock may be deinitialized already and its * internal data may be destroyed (after execution of memblock_free_all)
*/ if (WARN_ON_ONCE(slab_is_available())) { void *vaddr = kzalloc_node(size, GFP_NOWAIT, nid);
return vaddr ? virt_to_phys(vaddr) : 0;
}
if (!align) { /* Can't use WARNs this early in boot on powerpc */
dump_stack();
align = SMP_CACHE_BYTES;
}
again:
found = memblock_find_in_range_node(size, align, start, end, nid,
flags); if (found && !__memblock_reserve(found, size, nid, MEMBLOCK_RSRV_KERN)) goto done;
if (numa_valid_node(nid) && !exact_nid) {
found = memblock_find_in_range_node(size, align, start,
end, NUMA_NO_NODE,
flags); if (found && !memblock_reserve_kern(found, size)) goto done;
}
if (flags & MEMBLOCK_MIRROR) {
flags &= ~MEMBLOCK_MIRROR;
pr_warn_ratelimited("Could not allocate %pap bytes of mirrored memory\n",
&size); goto again;
}
return 0;
done: /* * Skip kmemleak for those places like kasan_init() and * early_pgtable_alloc() due to high volume.
*/ if (end != MEMBLOCK_ALLOC_NOLEAKTRACE) /* * Memblock allocated blocks are never reported as * leaks. This is because many of these blocks are * only referred via the physical address which is * not looked up by kmemleak.
*/
kmemleak_alloc_phys(found, size, 0);
/* * Some Virtual Machine platforms, such as Intel TDX or AMD SEV-SNP, * require memory to be accepted before it can be used by the * guest. * * Accept the memory of the allocated buffer.
*/
accept_memory(found, size);
return found;
}
/** * memblock_phys_alloc_range - allocate a memory block inside specified range * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @start: the lower bound of the memory region to allocate (physical address) * @end: the upper bound of the memory region to allocate (physical address) * * Allocate @size bytes in the between @start and @end. * * Return: physical address of the allocated memory block on success, * %0 on failure.
*/
phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
phys_addr_t align,
phys_addr_t start,
phys_addr_t end)
{
memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
__func__, (u64)size, (u64)align, &start, &end,
(void *)_RET_IP_); return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, false);
}
/** * memblock_phys_alloc_try_nid - allocate a memory block from specified NUMA node * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Allocates memory block from the specified NUMA node. If the node * has no available memory, attempts to allocated from any node in the * system. * * Return: physical address of the allocated memory block on success, * %0 on failure.
*/
phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{ return memblock_alloc_range_nid(size, align, 0,
MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
}
/** * memblock_alloc_internal - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region to allocate (phys address) * @max_addr: the upper bound of the memory region to allocate (phys address) * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @exact_nid: control the allocation fall back to other nodes * * Allocates memory block using memblock_alloc_range_nid() and * converts the returned physical address to virtual. * * The @min_addr limit is dropped if it can not be satisfied and the allocation * will fall back to memory below @min_addr. Other constraints, such * as node and mirrored memory will be handled again in * memblock_alloc_range_nid(). * * Return: * Virtual address of allocated memory block on success, NULL on failure.
*/ staticvoid * __init memblock_alloc_internal(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid, bool exact_nid)
{
phys_addr_t alloc;
if (max_addr > memblock.current_limit)
max_addr = memblock.current_limit;
/* retry allocation without lower limit */ if (!alloc && min_addr)
alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
exact_nid);
if (!alloc) return NULL;
return phys_to_virt(alloc);
}
/** * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node * without zeroing memory * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation * is preferred (phys address) * @max_addr: the upper bound of the memory region from where the allocation * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to * allocate only from memory limited by memblock.current_limit value * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Public function, provides additional debug information (including caller * info), if enabled. Does not zero allocated memory. * * Return: * Virtual address of allocated memory block on success, NULL on failure.
*/ void * __init memblock_alloc_exact_nid_raw(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
/** * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing * memory and without panicking * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation * is preferred (phys address) * @max_addr: the upper bound of the memory region from where the allocation * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to * allocate only from memory limited by memblock.current_limit value * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Public function, provides additional debug information (including caller * info), if enabled. Does not zero allocated memory, does not panic if request * cannot be satisfied. * * Return: * Virtual address of allocated memory block on success, NULL on failure.
*/ void * __init memblock_alloc_try_nid_raw(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{
memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
__func__, (u64)size, (u64)align, nid, &min_addr,
&max_addr, (void *)_RET_IP_);
/** * memblock_alloc_try_nid - allocate boot memory block * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @min_addr: the lower bound of the memory region from where the allocation * is preferred (phys address) * @max_addr: the upper bound of the memory region from where the allocation * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to * allocate only from memory limited by memblock.current_limit value * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * * Public function, provides additional debug information (including caller * info), if enabled. This function zeroes the allocated memory. * * Return: * Virtual address of allocated memory block on success, NULL on failure.
*/ void * __init memblock_alloc_try_nid(
phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr, int nid)
{ void *ptr;
/** * __memblock_alloc_or_panic - Try to allocate memory and panic on failure * @size: size of memory block to be allocated in bytes * @align: alignment of the region and block's size * @func: caller func name * * This function attempts to allocate memory using memblock_alloc, * and in case of failure, it calls panic with the formatted message. * This function should not be used directly, please use the macro memblock_alloc_or_panic.
*/ void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align, constchar *func)
{ void *addr = memblock_alloc(size, align);
if (unlikely(!addr))
panic("%s: Failed to allocate %pap bytes\n", func, &size); return addr;
}
/** * memblock_free_late - free pages directly to buddy allocator * @base: phys starting address of the boot memory block * @size: size of the boot memory block in bytes * * This is only useful when the memblock allocator has already been torn * down, but we are still initializing the system. Pages are released directly * to the buddy allocator.
*/ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
phys_addr_t cursor, end;
end = base + size - 1;
memblock_dbg("%s: [%pa-%pa] %pS\n",
__func__, &base, &end, (void *)_RET_IP_);
kmemleak_free_part_phys(base, size);
cursor = PFN_UP(base);
end = PFN_DOWN(base + size);
if (nid == memblock_get_region_node(r) || !numa_valid_node(nid)) if (r->flags & MEMBLOCK_RSRV_KERN)
total += size;
}
return total;
}
/** * memblock_estimated_nr_free_pages - return estimated number of free pages * from memblock point of view * * During bootup, subsystems might need a rough estimate of the number of free * pages in the whole system, before precise numbers are available from the * buddy. Especially with CONFIG_DEFERRED_STRUCT_PAGE_INIT, the numbers * obtained from the buddy might be very imprecise during bootup. * * Return: * An estimated number of free pages from memblock point of view.
*/ unsignedlong __init memblock_estimated_nr_free_pages(void)
{ return PHYS_PFN(memblock_phys_mem_size() - memblock_reserved_size());
}
/* * translate the memory @limit size into the max address within one of * the memory memblock regions, if the @limit exceeds the total size * of those regions, max_addr will keep original value PHYS_ADDR_MAX
*/
for_each_mem_region(r) { if (limit <= r->size) {
max_addr = r->base + limit; break;
}
limit -= r->size;
}
/* @limit exceeds the total size of the memory, do nothing */ if (max_addr == PHYS_ADDR_MAX) return;
/* truncate both memory and reserved regions */
memblock_remove_range(&memblock.memory, max_addr,
PHYS_ADDR_MAX);
memblock_remove_range(&memblock.reserved, max_addr,
PHYS_ADDR_MAX);
}
void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
{ int start_rgn, end_rgn; int i, ret;
if (!size) return;
if (!memblock_memory->total_size) {
pr_warn("%s: No memory registered yet\n", __func__); return;
}
ret = memblock_isolate_range(&memblock.memory, base, size,
&start_rgn, &end_rgn); if (ret) return;
/* remove all the MAP regions */ for (i = memblock.memory.cnt - 1; i >= end_rgn; i--) if (!memblock_is_nomap(&memblock.memory.regions[i]))
memblock_remove_region(&memblock.memory, i);
for (i = start_rgn - 1; i >= 0; i--) if (!memblock_is_nomap(&memblock.memory.regions[i]))
memblock_remove_region(&memblock.memory, i);
/* truncate the reserved regions */
memblock_remove_range(&memblock.reserved, 0, base);
memblock_remove_range(&memblock.reserved,
base + size, PHYS_ADDR_MAX);
}
/** * memblock_is_region_memory - check if a region is a subset of memory * @base: base of region to check * @size: size of region to check * * Check if the region [@base, @base + @size) is a subset of a memory block. * * Return: * 0 if false, non-zero if true
*/ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
{ int idx = memblock_search(&memblock.memory, base);
phys_addr_t end = base + memblock_cap_size(base, &size);
/** * memblock_is_region_reserved - check if a region intersects reserved memory * @base: base of region to check * @size: size of region to check * * Check if the region [@base, @base + @size) intersects a reserved * memory block. * * Return: * True if they intersect, false if not.
*/ bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
{ return memblock_overlaps_region(&memblock.reserved, base, size);
}
/* * Convert to physical addresses, and round start upwards and end * downwards.
*/
pg = PAGE_ALIGN(__pa(start_pg));
pgend = PAGE_ALIGN_DOWN(__pa(end_pg));
/* * If there are free pages between these, free the section of the * memmap array.
*/ if (pg < pgend)
memblock_phys_free(pg, pgend - pg);
}
/* * The mem_map array can get very big. Free the unused area of the memory map.
*/ staticvoid __init free_unused_memmap(void)
{ unsignedlong start, end, prev_end = 0; int i;
if (!IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) ||
IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP)) return;
/* * This relies on each bank being in address order. * The banks are sorted previously in bootmem_init().
*/
for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { #ifdef CONFIG_SPARSEMEM /* * Take care not to free memmap entries that don't exist * due to SPARSEMEM sections which aren't present.
*/
start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); #endif /* * Align down here since many operations in VM subsystem * presume that there are no holes in the memory map inside * a pageblock
*/
start = pageblock_start_pfn(start);
/* * If we had a previous bank, and there is a space
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.22 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.