struct memory_tier { /* hierarchy of memory tiers */ struct list_head list; /* list of all memory types part of this tier */ struct list_head memory_types; /* * start value of abstract distance. memory tier maps * an abstract distance range, * adistance_start .. adistance_start + MEMTIER_CHUNK_SIZE
*/ int adistance_start; struct device dev; /* All the nodes that are part of all the lower memory tiers. */
nodemask_t lower_tier_mask;
};
struct demotion_nodes {
nodemask_t preferred;
};
struct node_memory_type_map { struct memory_dev_type *memtype; int map_count;
};
static DEFINE_MUTEX(memory_tier_lock); static LIST_HEAD(memory_tiers); /* * The list is used to store all memory types that are not created * by a device driver.
*/ static LIST_HEAD(default_memory_types); staticstruct node_memory_type_map node_memory_types[MAX_NUMNODES]; struct memory_dev_type *default_dram_type;
nodemask_t default_dram_nodes __initdata = NODE_MASK_NONE;
#ifdef CONFIG_NUMA_BALANCING /** * folio_use_access_time - check if a folio reuses cpupid for page access time * @folio: folio to check * * folio's _last_cpupid field is repurposed by memory tiering. In memory * tiering mode, cpupid of slow memory folio (not toptier memory) is used to * record page access time. * * Return: the folio _last_cpupid is used to record page access time
*/ bool folio_use_access_time(struct folio *folio)
{ return (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) &&
!node_is_toptier(folio_nid(folio));
} #endif
/* The lock is used to protect `default_dram_perf*` info and nid. */ static DEFINE_MUTEX(default_dram_perf_lock); staticbool default_dram_perf_error; staticstruct access_coordinate default_dram_perf; staticint default_dram_perf_ref_nid = NUMA_NO_NODE; staticconstchar *default_dram_perf_ref_source;
staticvoid memory_tier_device_release(struct device *dev)
{ struct memory_tier *tier = to_memory_tier(dev); /* * synchronize_rcu in clear_node_memory_tier makes sure * we don't have rcu access to this memory tier.
*/
kfree(tier);
}
adistance = round_down(adistance, memtier_adistance_chunk_size); /* * If the memtype is already part of a memory tier, * just return that.
*/ if (!list_empty(&memtype->tier_sibling)) {
list_for_each_entry(memtier, &memory_tiers, list) { if (adistance == memtier->adistance_start) return memtier;
}
WARN_ON(1); return ERR_PTR(-EINVAL);
}
pgdat = NODE_DATA(node); if (!pgdat) return NULL; /* * Since we hold memory_tier_lock, we can avoid * RCU read locks when accessing the details. No * parallel updates are possible here.
*/ return rcu_dereference_check(pgdat->memtier,
lockdep_is_held(&memory_tier_lock));
}
/* * pg_data_t.memtier updates includes a synchronize_rcu() * which ensures that we either find NULL or a valid memtier * in NODE_DATA. protect the access via rcu_read_lock();
*/
rcu_read_lock();
memtier = rcu_dereference(pgdat->memtier); if (memtier)
*targets = memtier->lower_tier_mask; else
*targets = NODE_MASK_NONE;
rcu_read_unlock();
}
/** * next_demotion_node() - Get the next node in the demotion path * @node: The starting node to lookup the next node * * Return: node id for next memory node in the demotion path hierarchy * from @node; NUMA_NO_NODE if @node is terminal. This does not keep * @node online or guarantee that it *continues* to be the next demotion * target.
*/ int next_demotion_node(int node)
{ struct demotion_nodes *nd; int target;
if (!node_demotion) return NUMA_NO_NODE;
nd = &node_demotion[node];
/* * node_demotion[] is updated without excluding this * function from running. * * Make sure to use RCU over entire code blocks if * node_demotion[] reads need to be consistent.
*/
rcu_read_lock(); /* * If there are multiple target nodes, just select one * target node randomly. * * In addition, we can also use round-robin to select * target node, but we should introduce another variable * for node_demotion[] to record last selected target node, * that may cause cache ping-pong due to the changing of * last target node. Or introducing per-cpu data to avoid * caching issue, which seems more complicated. So selecting * target node randomly seems better until now.
*/
target = node_random(&nd->preferred);
rcu_read_unlock();
return target;
}
staticvoid disable_all_demotion_targets(void)
{ struct memory_tier *memtier; int node;
for_each_node_state(node, N_MEMORY) {
node_demotion[node].preferred = NODE_MASK_NONE; /* * We are holding memory_tier_lock, it is safe * to access pgda->memtier.
*/
memtier = __node_get_memory_tier(node); if (memtier)
memtier->lower_tier_mask = NODE_MASK_NONE;
} /* * Ensure that the "disable" is visible across the system. * Readers will see either a combination of before+disable * state or disable+after. They will never see before and * after state together.
*/
synchronize_rcu();
}
staticvoid dump_demotion_targets(void)
{ int node;
if (nodes_empty(preferred))
pr_info("Demotion targets for Node %d: null\n", node); else
pr_info("Demotion targets for Node %d: preferred: %*pbl, fallback: %*pbl\n",
node, nodemask_pr_args(&preferred),
nodemask_pr_args(&memtier->lower_tier_mask));
}
}
/* * Find an automatic demotion target for all memory * nodes. Failing here is OK. It might just indicate * being at the end of a chain.
*/ staticvoid establish_demotion_targets(void)
{ struct memory_tier *memtier; struct demotion_nodes *nd; int target = NUMA_NO_NODE, node; int distance, best_distance;
nodemask_t tier_nodes, lower_tier;
memtier = __node_get_memory_tier(node); if (!memtier || list_is_last(&memtier->list, &memory_tiers)) continue; /* * Get the lower memtier to find the demotion node list.
*/
memtier = list_next_entry(memtier, list);
tier_nodes = get_memtier_nodemask(memtier); /* * find_next_best_node, use 'used' nodemask as a skip list. * Add all memory nodes except the selected memory tier * nodelist to skip list so that we find the best node from the * memtier nodelist.
*/
nodes_andnot(tier_nodes, node_states[N_MEMORY], tier_nodes);
/* * Find all the nodes in the memory tier node list of same best distance. * add them to the preferred mask. We randomly select between nodes * in the preferred mask when allocating pages during demotion.
*/ do {
target = find_next_best_node(node, &tier_nodes); if (target == NUMA_NO_NODE) break;
distance = node_distance(node, target); if (distance == best_distance || best_distance == -1) {
best_distance = distance;
node_set(target, nd->preferred);
} else { break;
}
} while (1);
} /* * Promotion is allowed from a memory tier to higher * memory tier only if the memory tier doesn't include * compute. We want to skip promotion from a memory tier, * if any node that is part of the memory tier have CPUs. * Once we detect such a memory tier, we consider that tier * as top tiper from which promotion is not allowed.
*/
list_for_each_entry_reverse(memtier, &memory_tiers, list) {
tier_nodes = get_memtier_nodemask(memtier);
nodes_and(tier_nodes, node_states[N_CPU], tier_nodes); if (!nodes_empty(tier_nodes)) { /* * abstract distance below the max value of this memtier * is considered toptier.
*/
top_tier_adistance = memtier->adistance_start +
MEMTIER_CHUNK_SIZE - 1; break;
}
} /* * Now build the lower_tier mask for each node collecting node mask from * all memory tier below it. This allows us to fallback demotion page * allocation to a set of nodes that is closer the above selected * preferred node.
*/
lower_tier = node_states[N_MEMORY];
list_for_each_entry(memtier, &memory_tiers, list) { /* * Keep removing current tier from lower_tier nodes, * This will remove all nodes in current and above * memory tier from the lower_tier mask.
*/
tier_nodes = get_memtier_nodemask(memtier);
nodes_andnot(lower_tier, lower_tier, tier_nodes);
memtier->lower_tier_mask = lower_tier;
}
staticinlinevoid __init_node_memory_type(int node, struct memory_dev_type *memtype)
{ if (!node_memory_types[node].memtype)
node_memory_types[node].memtype = memtype; /* * for each device getting added in the same NUMA node * with this specific memtype, bump the map count. We * Only take memtype device reference once, so that * changing a node memtype can be done by droping the * only reference count taken here.
*/
if (node_memory_types[node].memtype == memtype) { if (!node_memory_types[node].map_count++)
kref_get(&memtype->kref);
}
}
if (!node_state(node, N_MEMORY)) return ERR_PTR(-EINVAL);
mt_calc_adistance(node, &adist); if (!node_memory_types[node].memtype) {
memtype = mt_find_alloc_memory_type(adist, &default_memory_types); if (IS_ERR(memtype)) {
memtype = default_dram_type;
pr_info("Failed to allocate a memory type. Fall back.\n");
}
}
/* * Make sure that anybody looking at NODE_DATA who finds * a valid memtier finds memory_dev_types with nodes still * linked to the memtier. We achieve this by waiting for * rcu read section to finish using synchronize_rcu. * This also enables us to free the destroyed memory tier * with kfree instead of kfree_rcu
*/
memtier = __node_get_memory_tier(node); if (memtier) { struct memory_dev_type *memtype;
/* * This is invoked via `late_initcall()` to initialize memory tiers for * memory nodes, both with and without CPUs. After the initialization of * firmware and devices, adistance algorithms are expected to be provided.
*/ staticint __init memory_tier_late_init(void)
{ int nid; struct memory_tier *memtier;
/* Assign each uninitialized N_MEMORY node to a memory tier. */
for_each_node_state(nid, N_MEMORY) { /* * Some device drivers may have initialized * memory tiers, potentially bringing memory nodes * online and configuring memory tiers. * Exclude them here.
*/ if (node_memory_types[nid].memtype) continue;
memtier = set_node_memory_tier(nid); if (IS_ERR(memtier)) continue;
}
/* * The performance of all default DRAM nodes is expected to be * same (that is, the variation is less than 10%). And it * will be used as base to calculate the abstract distance of * other memory nodes.
*/ if (abs(perf->read_latency - default_dram_perf.read_latency) * 10 >
default_dram_perf.read_latency ||
abs(perf->write_latency - default_dram_perf.write_latency) * 10 >
default_dram_perf.write_latency ||
abs(perf->read_bandwidth - default_dram_perf.read_bandwidth) * 10 >
default_dram_perf.read_bandwidth ||
abs(perf->write_bandwidth - default_dram_perf.write_bandwidth) * 10 >
default_dram_perf.write_bandwidth) {
pr_info( "memory-tiers: the performance of DRAM node %d mismatches that of the reference\n" "DRAM node %d.\n", nid, default_dram_perf_ref_nid);
pr_info(" performance of reference DRAM node %d from %s:\n",
default_dram_perf_ref_nid, default_dram_perf_ref_source);
dump_hmem_attrs(&default_dram_perf, " ");
pr_info(" performance of DRAM node %d from %s:\n", nid, source);
dump_hmem_attrs(perf, " ");
pr_info( " disable default DRAM node performance based abstract distance algorithm.\n");
default_dram_perf_error = true; return -EINVAL;
}
return 0;
}
int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
{
guard(mutex)(&default_dram_perf_lock); if (default_dram_perf_error) return -EIO;
if (default_dram_perf_ref_nid == NUMA_NO_NODE) return -ENOENT;
/* * The abstract distance of a memory node is in direct proportion to * its memory latency (read + write) and inversely proportional to its * memory bandwidth (read + write). The abstract distance, memory * latency, and memory bandwidth of the default DRAM nodes are used as * the base.
*/
*adist = MEMTIER_ADISTANCE_DRAM *
(perf->read_latency + perf->write_latency) /
(default_dram_perf.read_latency + default_dram_perf.write_latency) *
(default_dram_perf.read_bandwidth + default_dram_perf.write_bandwidth) /
(perf->read_bandwidth + perf->write_bandwidth);
/** * register_mt_adistance_algorithm() - Register memory tiering abstract distance algorithm * @nb: The notifier block which describe the algorithm * * Return: 0 on success, errno on error. * * Every memory tiering abstract distance algorithm provider needs to * register the algorithm with register_mt_adistance_algorithm(). To * calculate the abstract distance for a specified memory node, the * notifier function will be called unless some high priority * algorithm has provided result. The prototype of the notifier * function is as follows, * * int (*algorithm_notifier)(struct notifier_block *nb, * unsigned long nid, void *data); * * Where "nid" specifies the memory node, "data" is the pointer to the * returned abstract distance (that is, "int *adist"). If the * algorithm provides the result, NOTIFY_STOP should be returned. * Otherwise, return_value & %NOTIFY_STOP_MASK == 0 to allow the next * algorithm in the chain to provide the result.
*/ int register_mt_adistance_algorithm(struct notifier_block *nb)
{ return blocking_notifier_chain_register(&mt_adistance_algorithms, nb);
}
EXPORT_SYMBOL_GPL(register_mt_adistance_algorithm);
/** * unregister_mt_adistance_algorithm() - Unregister memory tiering abstract distance algorithm * @nb: the notifier block which describe the algorithm * * Return: 0 on success, errno on error.
*/ int unregister_mt_adistance_algorithm(struct notifier_block *nb)
{ return blocking_notifier_chain_unregister(&mt_adistance_algorithms, nb);
}
EXPORT_SYMBOL_GPL(unregister_mt_adistance_algorithm);
/** * mt_calc_adistance() - Calculate abstract distance with registered algorithms * @node: the node to calculate abstract distance for * @adist: the returned abstract distance * * Return: if return_value & %NOTIFY_STOP_MASK != 0, then some * abstract distance algorithm provides the result, and return it via * @adist. Otherwise, no algorithm can provide the result and @adist * will be kept as it is.
*/ int mt_calc_adistance(int node, int *adist)
{ return blocking_notifier_call_chain(&mt_adistance_algorithms, node, adist);
}
EXPORT_SYMBOL_GPL(mt_calc_adistance);
mutex_lock(&memory_tier_lock); /* * For now we can have 4 faster memory tiers with smaller adistance * than default DRAM tier.
*/
default_dram_type = mt_find_alloc_memory_type(MEMTIER_ADISTANCE_DRAM,
&default_memory_types);
mutex_unlock(&memory_tier_lock); if (IS_ERR(default_dram_type))
panic("%s() failed to allocate default DRAM tier\n", __func__);
/* Record nodes with memory and CPU to set default DRAM performance. */
nodes_and(default_dram_nodes, node_states[N_MEMORY],
node_states[N_CPU]);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.