/* * A simple test that tries to allocate a memory region, which spans over the * min_addr and max_addr range: * * + + * | +---------------+ | * | | rgn | | * +------+---------------+-------+ * ^ ^ * | | * min_addr max_addr * * Expect to allocate a region that starts at min_addr and ends at * max_addr, given that min_addr is aligned.
*/ staticint alloc_nid_exact_address_generic_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
phys_addr_t rgn_end;
/* * A test that tries to allocate a memory region, which can't fit into * min_addr and max_addr range, with the latter being too close to the beginning * of the available memory: * * +-------------+ * | new | * +-------------+ * + + * | + | * | | | * +-------+--------------+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect no allocation to happen.
*/ staticint alloc_nid_low_max_generic_check(void)
{ void *allocated_ptr = NULL;
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region within min_addr min_addr range, * with min_addr being so close that it's next to an allocated region: * * + + * | +--------+---------------| * | | r1 | rgn | * +-------+--------+---------------+ * ^ ^ * | | * min_addr max_addr * * Expect a merge of both regions. Only the region size gets updated.
*/ staticint alloc_nid_min_reserved_generic_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_128;
phys_addr_t r2_size = SZ_64;
phys_addr_t total_size = r1_size + r2_size;
phys_addr_t min_addr;
phys_addr_t max_addr;
phys_addr_t reserved_base;
/* * A test that tries to allocate a memory region within min_addr and max_addr, * with max_addr being so close that it's next to an allocated region: * * + + * | +-------------+--------| * | | rgn | r1 | * +----------+-------------+--------+ * ^ ^ * | | * min_addr max_addr * * Expect a merge of regions. Only the region size gets updated.
*/ staticint alloc_nid_max_reserved_generic_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t r1_size = SZ_64;
phys_addr_t r2_size = SZ_128;
phys_addr_t total_size = r1_size + r2_size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap big enough to fit * a new region: * * + + * | +--------+ +-------+------+ | * | | r2 | | rgn | r1 | | * +----+--------+---+-------+------+--+ * ^ ^ * | | * min_addr max_addr * * Expect to merge the new region with r1. The second region does not get * updated. The total size field gets updated.
*/
/* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap of a size equal to * the size of the new region: * * + + * | +--------+--------+--------+ | * | | r2 | r3 | r1 | | * +-----+--------+--------+--------+-----+ * ^ ^ * | | * min_addr max_addr * * Expect to merge all of the regions into one. The region counter and total * size fields get updated.
*/ staticint alloc_nid_reserved_full_merge_generic_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; struct region r1, r2;
phys_addr_t r3_size = SZ_64;
phys_addr_t total_size;
phys_addr_t max_addr;
phys_addr_t min_addr;
/* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap that can't fit * a new region: * * + + * | +----------+------+ +------+ | * | | r3 | r2 | | r1 | | * +--+----------+------+----+------+---+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect to merge the new region with r2. The second region does not get * updated. The total size counter gets updated.
*/ staticint alloc_nid_top_down_reserved_no_space_check(void)
{ struct memblock_region *rgn1 = &memblock.reserved.regions[1]; struct memblock_region *rgn2 = &memblock.reserved.regions[0]; void *allocated_ptr = NULL; struct region r1, r2;
phys_addr_t r3_size = SZ_256;
phys_addr_t gap_size = SMP_CACHE_BYTES;
phys_addr_t total_size;
phys_addr_t max_addr;
phys_addr_t min_addr;
/* * A test that tries to allocate a memory region, where max_addr is * bigger than the end address of the available memory. Expect to allocate * a region that ends before the end of the memory.
*/ staticint alloc_nid_top_down_cap_max_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region, where min_addr is * smaller than the start address of the available memory. Expect to allocate * a region that ends before the end of the memory.
*/ staticint alloc_nid_top_down_cap_min_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region, which can't fit into min_addr * and max_addr range: * * + + * |---------+ + + | * | rgn | | | | * +---------+---------+----+------+ * ^ ^ * | | * | max_addr * | * min_add * * Expect to drop the lower limit and allocate a memory region which * starts at the beginning of the available memory.
*/ staticint alloc_nid_bottom_up_narrow_range_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap big enough to fit * a new region: * * + + * | +--------+-------+ +------+ | * | | r2 | rgn | | r1 | | * +----+--------+-------+---+------+--+ * ^ ^ * | | * min_addr max_addr * * Expect to merge the new region with r2. The second region does not get * updated. The total size field gets updated.
*/
/* * A test that tries to allocate memory within min_addr and max_add range, when * there are two reserved regions at the borders, with a gap of a size equal to * the size of the new region: * * + + * |----------+ +------+ +----+ | * | r3 | | r2 | | r1 | | * +----------+----+------+---+----+--+ * ^ ^ * | | * | max_addr * | * min_addr * * Expect to drop the lower limit and allocate memory at the beginning of the * available memory. The region counter and total size fields get updated. * Other regions are not modified.
*/
/* * A test that tries to allocate a memory region, where max_addr is * bigger than the end address of the available memory. Expect to allocate * a region that starts at the min_addr.
*/ staticint alloc_nid_bottom_up_cap_max_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_256;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region, where min_addr is * smaller than the start address of the available memory. Expect to allocate * a region at the beginning of the available memory.
*/ staticint alloc_nid_bottom_up_cap_min_check(void)
{ struct memblock_region *rgn = &memblock.reserved.regions[0]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_1K;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* Test case wrappers for range tests */ staticint alloc_nid_simple_check(void)
{
test_print("\tRunning %s...\n", __func__);
memblock_set_bottom_up(false);
alloc_nid_top_down_simple_check();
memblock_set_bottom_up(true);
alloc_nid_bottom_up_simple_check();
/* * A test that tries to allocate a memory region in a specific NUMA node that * has enough memory to allocate a region of the requested size. * Expect to allocate an aligned region at the end of the requested node.
*/ staticint alloc_nid_top_down_numa_simple_check(void)
{ int nid_req = 3; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * does not have enough memory to allocate a region of the requested size: * * | +-----+ +------------------+ | * | | req | | expected | | * +---+-----+----------+------------------+-----+ * * | +---------+ | * | | rgn | | * +-----------------------------+---------+-----+ * * Expect to allocate an aligned region at the end of the last node that has * enough memory (in this case, nid = 6) after falling back to NUMA_NO_NODE.
*/ staticint alloc_nid_top_down_numa_small_node_check(void)
{ int nid_req = 1; int nid_exp = 6; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * is fully reserved: * * | +---------+ +------------------+ | * | |requested| | expected | | * +--------------+---------+------------+------------------+-----+ * * | +---------+ +---------+ | * | | reserved| | new | | * +--------------+---------+---------------------+---------+-----+ * * Expect to allocate an aligned region at the end of the last node that is * large enough and has enough unreserved memory (in this case, nid = 6) after * falling back to NUMA_NO_NODE. The region count and total size get updated.
*/ staticint alloc_nid_top_down_numa_node_reserved_check(void)
{ int nid_req = 2; int nid_exp = 6; struct memblock_region *new_rgn = &memblock.reserved.regions[1]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved but has enough memory for the allocated region: * * | +---------------------------------------+ | * | | requested | | * +-----------+---------------------------------------+----------+ * * | +------------------+ +-----+ | * | | reserved | | new | | * +-----------+------------------+--------------+-----+----------+ * * Expect to allocate an aligned region at the end of the requested node. The * region count and total size get updated.
*/ staticint alloc_nid_top_down_numa_part_reserved_check(void)
{ int nid_req = 4; struct memblock_region *new_rgn = &memblock.reserved.regions[1]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; struct region r1;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved and does not have enough contiguous memory for the * allocated region: * * | +-----------------------+ +----------------------| * | | requested | | expected | * +-----------+-----------------------+---------+----------------------+ * * | +----------+ +-----------| * | | reserved | | new | * +-----------------+----------+---------------------------+-----------+ * * Expect to allocate an aligned region at the end of the last node that is * large enough and has enough unreserved memory (in this case, * nid = NUMA_NODES - 1) after falling back to NUMA_NO_NODE. The region count * and total size get updated.
*/ staticint alloc_nid_top_down_numa_part_reserved_fallback_check(void)
{ int nid_req = 4; int nid_exp = NUMA_NODES - 1; struct memblock_region *new_rgn = &memblock.reserved.regions[1]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; struct region r1;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the first * node is the requested node: * * min_addr * | max_addr * | | * v v * | +-----------------------+-----------+ | * | | requested | node3 | | * +-----------+-----------------------+-----------+--------------+ * + + * | +-----------+ | * | | rgn | | * +-----------------------+-----------+--------------------------+ * * Expect to drop the lower limit and allocate a memory region that ends at * the end of the requested node.
*/ staticint alloc_nid_top_down_numa_split_range_low_check(void)
{ int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_512;
phys_addr_t min_addr;
phys_addr_t max_addr;
phys_addr_t req_node_end;
/* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the second * node is the requested node: * * min_addr * | max_addr * | | * v v * | +--------------------------+---------+ | * | | expected |requested| | * +------+--------------------------+---------+----------------+ * + + * | +---------+ | * | | rgn | | * +-----------------------+---------+--------------------------+ * * Expect to drop the lower limit and allocate a memory region that * ends at the end of the first node that overlaps with the range.
*/ staticint alloc_nid_top_down_numa_split_range_high_check(void)
{ int nid_req = 3; int nid_exp = nid_req - 1; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_512;
phys_addr_t min_addr;
phys_addr_t max_addr;
phys_addr_t exp_node_end;
/* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the requested * node ends before min_addr: * * min_addr * | max_addr * | | * v v * | +---------------+ +-------------+---------+ | * | | requested | | node1 | node2 | | * +----+---------------+--------+-------------+---------+----------+ * + + * | +---------+ | * | | rgn | | * +----------+---------+-------------------------------------------+ * * Expect to drop the lower limit and allocate a memory region that ends at * the end of the requested node.
*/ staticint alloc_nid_top_down_numa_no_overlap_split_check(void)
{ int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *node2 = &memblock.memory.regions[6]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate memory within min_addr and max_add range when * the requested node and the range do not overlap, and requested node ends * before min_addr. The range overlaps with multiple nodes along node * boundaries: * * min_addr * | max_addr * | | * v v * |-----------+ +----------+----...----+----------+ | * | requested | | min node | ... | max node | | * +-----------+-----------+----------+----...----+----------+------+ * + + * | +-----+ | * | | rgn | | * +---------------------------------------------------+-----+------+ * * Expect to allocate a memory region at the end of the final node in * the range after falling back to NUMA_NO_NODE.
*/ staticint alloc_nid_top_down_numa_no_overlap_low_check(void)
{ int nid_req = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *min_node = &memblock.memory.regions[2]; struct memblock_region *max_node = &memblock.memory.regions[5]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_64;
phys_addr_t max_addr;
phys_addr_t min_addr;
/* * A test that tries to allocate memory within min_addr and max_add range when * the requested node and the range do not overlap, and requested node starts * after max_addr. The range overlaps with multiple nodes along node * boundaries: * * min_addr * | max_addr * | | * v v * | +----------+----...----+----------+ +-----------+ | * | | min node | ... | max node | | requested | | * +-----+----------+----...----+----------+--------+-----------+---+ * + + * | +-----+ | * | | rgn | | * +---------------------------------+-----+------------------------+ * * Expect to allocate a memory region at the end of the final node in * the range after falling back to NUMA_NO_NODE.
*/ staticint alloc_nid_top_down_numa_no_overlap_high_check(void)
{ int nid_req = 7; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *min_node = &memblock.memory.regions[2]; struct memblock_region *max_node = &memblock.memory.regions[5]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_64;
phys_addr_t max_addr;
phys_addr_t min_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * has enough memory to allocate a region of the requested size. * Expect to allocate an aligned region at the beginning of the requested node.
*/ staticint alloc_nid_bottom_up_numa_simple_check(void)
{ int nid_req = 3; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * does not have enough memory to allocate a region of the requested size: * * |----------------------+-----+ | * | expected | req | | * +----------------------+-----+----------------+ * * |---------+ | * | rgn | | * +---------+-----------------------------------+ * * Expect to allocate an aligned region at the beginning of the first node that * has enough memory (in this case, nid = 0) after falling back to NUMA_NO_NODE.
*/ staticint alloc_nid_bottom_up_numa_small_node_check(void)
{ int nid_req = 1; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * is fully reserved: * * |----------------------+ +-----------+ | * | expected | | requested | | * +----------------------+-----+-----------+--------------------+ * * |-----------+ +-----------+ | * | new | | reserved | | * +-----------+----------------+-----------+--------------------+ * * Expect to allocate an aligned region at the beginning of the first node that * is large enough and has enough unreserved memory (in this case, nid = 0) * after falling back to NUMA_NO_NODE. The region count and total size get * updated.
*/ staticint alloc_nid_bottom_up_numa_node_reserved_check(void)
{ int nid_req = 2; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved but has enough memory for the allocated region: * * | +---------------------------------------+ | * | | requested | | * +-----------+---------------------------------------+---------+ * * | +------------------+-----+ | * | | reserved | new | | * +-----------+------------------+-----+------------------------+ * * Expect to allocate an aligned region in the requested node that merges with * the existing reserved region. The total size gets updated.
*/ staticint alloc_nid_bottom_up_numa_part_reserved_check(void)
{ int nid_req = 4; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL; struct region r1;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
phys_addr_t total_size;
/* * A test that tries to allocate a memory region in a specific NUMA node that * is partially reserved and does not have enough contiguous memory for the * allocated region: * * |----------------------+ +-----------------------+ | * | expected | | requested | | * +----------------------+-------+-----------------------+---------+ * * |-----------+ +----------+ | * | new | | reserved | | * +-----------+------------------------+----------+----------------+ * * Expect to allocate an aligned region at the beginning of the first * node that is large enough and has enough unreserved memory (in this case, * nid = 0) after falling back to NUMA_NO_NODE. The region count and total size * get updated.
*/ staticint alloc_nid_bottom_up_numa_part_reserved_fallback_check(void)
{ int nid_req = 4; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL; struct region r1;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
/* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the first * node is the requested node: * * min_addr * | max_addr * | | * v v * | +-----------------------+-----------+ | * | | requested | node3 | | * +-----------+-----------------------+-----------+--------------+ * + + * | +-----------+ | * | | rgn | | * +-----------+-----------+--------------------------------------+ * * Expect to drop the lower limit and allocate a memory region at the beginning * of the requested node.
*/ staticint alloc_nid_bottom_up_numa_split_range_low_check(void)
{ int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_512;
phys_addr_t min_addr;
phys_addr_t max_addr;
phys_addr_t req_node_end;
/* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the second * node is the requested node: * * min_addr * | max_addr * | | * v v * |------------------+ +----------------------+---------+ | * | expected | | previous |requested| | * +------------------+--------+----------------------+---------+------+ * + + * |---------+ | * | rgn | | * +---------+---------------------------------------------------------+ * * Expect to drop the lower limit and allocate a memory region at the beginning * of the first node that has enough memory.
*/ staticint alloc_nid_bottom_up_numa_split_range_high_check(void)
{ int nid_req = 3; int nid_exp = 0; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *exp_node = &memblock.memory.regions[nid_exp]; void *allocated_ptr = NULL;
phys_addr_t size = SZ_512;
phys_addr_t min_addr;
phys_addr_t max_addr;
phys_addr_t exp_node_end;
/* * A test that tries to allocate a memory region that spans over the min_addr * and max_addr range and overlaps with two different nodes, where the requested * node ends before min_addr: * * min_addr * | max_addr * | | * v v * | +---------------+ +-------------+---------+ | * | | requested | | node1 | node2 | | * +----+---------------+--------+-------------+---------+---------+ * + + * | +---------+ | * | | rgn | | * +----+---------+------------------------------------------------+ * * Expect to drop the lower limit and allocate a memory region that starts at * the beginning of the requested node.
*/ staticint alloc_nid_bottom_up_numa_no_overlap_split_check(void)
{ int nid_req = 2; struct memblock_region *new_rgn = &memblock.reserved.regions[0]; struct memblock_region *req_node = &memblock.memory.regions[nid_req]; struct memblock_region *node2 = &memblock.memory.regions[6]; void *allocated_ptr = NULL;
phys_addr_t size;
phys_addr_t min_addr;
phys_addr_t max_addr;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.