// SPDX-License-Identifier: GPL-2.0-only /* * NUMA support, based on the x86 implementation. * * Copyright (C) 2015 Cavium Inc. * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com>
*/
/* * Allocate node_to_cpumask_map based on number of available nodes * Requires node_possible_map to be valid. * * Note: cpumask_of_node() is not valid until after this is done. * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
*/ staticvoid __init setup_node_to_cpumask_map(void)
{ int node;
/* setup nr_node_ids if not done yet */ if (nr_node_ids == MAX_NUMNODES)
setup_nr_node_ids();
/* allocate and clear the mapping */ for (node = 0; node < nr_node_ids; node++) {
alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
cpumask_clear(node_to_cpumask_map[node]);
}
/* cpumask_of_node() will now work */
pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids);
}
/* * Set the cpu to node and mem mapping
*/ void numa_store_cpu_info(unsignedint cpu)
{
set_cpu_numa_node(cpu, cpu_to_node_map[cpu]);
}
void __init early_map_cpu_to_node(unsignedint cpu, int nid)
{ /* fallback to node 0 */ if (nid < 0 || nid >= MAX_NUMNODES || numa_off)
nid = 0;
cpu_to_node_map[cpu] = nid;
/* * We should set the numa node of cpu0 as soon as possible, because it * has already been set up online before. cpu_to_node(0) will soon be * called.
*/ if (!cpu)
set_cpu_numa_node(cpu, nid);
}
/** * dummy_numa_init() - Fallback dummy NUMA init * * Used if there's no underlying NUMA architecture, NUMA initialization * fails, or NUMA is disabled on the command line. * * Must online at least one node (node 0) and add memory blocks that cover all * allowed memory. It is unlikely that this function fails. * * Return: 0 on success, -errno on failure.
*/ staticint __init dummy_numa_init(void)
{
phys_addr_t start = memblock_start_of_DRAM();
phys_addr_t end = memblock_end_of_DRAM() - 1; int ret;
if (numa_off)
pr_info("NUMA disabled\n"); /* Forced off on command line. */
pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
ret = numa_add_memblk(0, start, end + 1); if (ret) {
pr_err("NUMA init failed\n"); return ret;
}
node_set(0, numa_nodes_parsed);
numa_off = true; return 0;
}
#ifdef CONFIG_ACPI_NUMA staticint __init arch_acpi_numa_init(void)
{ int ret;
ret = acpi_numa_init(); if (ret) {
pr_debug("Failed to initialise from firmware\n"); return ret;
}
/** * arch_numa_init() - Initialize NUMA * * Try each configured NUMA initialization method until one succeeds. The * last fallback is dummy single node config encompassing whole memory.
*/ void __init arch_numa_init(void)
{ if (!numa_off) { if (!acpi_disabled && !numa_init(arch_acpi_numa_init)) return; if (acpi_disabled && !numa_init(of_numa_init)) return;
}
numa_init(dummy_numa_init);
}
#ifdef CONFIG_NUMA_EMU void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsignedint nr_emu_nids)
{ int i, j;
/* * Transform cpu_to_node_map table to use emulated nids by * reverse-mapping phys_nid. The maps should always exist but fall * back to zero just in case.
*/ for (i = 0; i < ARRAY_SIZE(cpu_to_node_map); i++) { if (cpu_to_node_map[i] == NUMA_NO_NODE) continue; for (j = 0; j < nr_emu_nids; j++) if (cpu_to_node_map[i] == emu_nid_to_phys[j]) break;
cpu_to_node_map[i] = j < nr_emu_nids ? j : 0;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.