// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019, Intel Corporation. * * Heterogeneous Memory Attributes Table (HMAT) representation * * This program parses and reports the platform's HMAT tables, and registers * the applicable attributes with the node's interfaces.
*/
/* * The defined enum order is used to prioritize attributes to break ties when * selecting the best performing node.
*/ enum locality_types {
WRITE_LATENCY,
READ_LATENCY,
WRITE_BANDWIDTH,
READ_BANDWIDTH,
};
/** * hmat_get_extended_linear_cache_size - Retrieve the extended linear cache size * @backing_res: resource from the backing media * @nid: node id for the memory region * @cache_size: (Output) size of extended linear cache. * * Return: 0 on success. Errno on failure. *
*/ int hmat_get_extended_linear_cache_size(struct resource *backing_res, int nid,
resource_size_t *cache_size)
{ unsignedint pxm = node_to_pxm(nid); struct memory_target *target; struct target_cache *tcache; struct resource *res;
target = find_mem_target(pxm); if (!target) return -ENOENT;
list_for_each_entry(tcache, &target->caches, node) { if (tcache->cache_attrs.address_mode !=
NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR) continue;
res = &target->memregions; if (!resource_contains(res, backing_res)) continue;
/** * acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port * @uid: ACPI unique id * @coord: The access coordinates written back out for the generic port. * Expect 2 levels array. * * Return: 0 on success. Errno on failure. * * Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL.
*/ int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord)
{ struct memory_target *target;
guard(mutex)(&target_lock);
target = acpi_find_genport_target(uid); if (!target) return -ENOENT;
target = alloc_target(mem_pxm); if (!target) return;
/* * There are potentially multiple ranges per PXM, so record each * in the per-target memregions resource tree.
*/ if (!__request_region(&target->memregions, start, len, "memory target",
IORESOURCE_MEM))
pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
start, start + len, mem_pxm);
}
static __init constchar *hmat_data_type(u8 type)
{ switch (type) { case ACPI_HMAT_ACCESS_LATENCY: return"Access Latency"; case ACPI_HMAT_READ_LATENCY: return"Read Latency"; case ACPI_HMAT_WRITE_LATENCY: return"Write Latency"; case ACPI_HMAT_ACCESS_BANDWIDTH: return"Access Bandwidth"; case ACPI_HMAT_READ_BANDWIDTH: return"Read Bandwidth"; case ACPI_HMAT_WRITE_BANDWIDTH: return"Write Bandwidth"; default: return"Reserved";
}
}
static __init constchar *hmat_data_type_suffix(u8 type)
{ switch (type) { case ACPI_HMAT_ACCESS_LATENCY: case ACPI_HMAT_READ_LATENCY: case ACPI_HMAT_WRITE_LATENCY: return" nsec"; case ACPI_HMAT_ACCESS_BANDWIDTH: case ACPI_HMAT_READ_BANDWIDTH: case ACPI_HMAT_WRITE_BANDWIDTH: return" MB/s"; default: return"";
}
}
/* * Check for invalid and overflow values
*/ if (entry == 0xffff || !entry) return 0; elseif (base > (UINT_MAX / (entry))) return 0;
/* * Divide by the base unit for version 1, convert latency from * picosenonds to nanoseconds if revision 2.
*/
value = entry * base; if (hmat_revision == 1) { if (value < 10) return 0;
value = DIV_ROUND_UP(value, 10);
} elseif (hmat_revision == 2) { switch (type) { case ACPI_HMAT_ACCESS_LATENCY: case ACPI_HMAT_READ_LATENCY: case ACPI_HMAT_WRITE_LATENCY:
value = DIV_ROUND_UP(value, 1000); break; default: break;
}
} return value;
}
staticvoid hmat_update_target_access(struct memory_target *target,
u8 type, u32 value, int access)
{ switch (type) { case ACPI_HMAT_ACCESS_LATENCY:
target->coord[access].read_latency = value;
target->coord[access].write_latency = value; break; case ACPI_HMAT_READ_LATENCY:
target->coord[access].read_latency = value; break; case ACPI_HMAT_WRITE_LATENCY:
target->coord[access].write_latency = value; break; case ACPI_HMAT_ACCESS_BANDWIDTH:
target->coord[access].read_bandwidth = value;
target->coord[access].write_bandwidth = value; break; case ACPI_HMAT_READ_BANDWIDTH:
target->coord[access].read_bandwidth = value; break; case ACPI_HMAT_WRITE_BANDWIDTH:
target->coord[access].write_bandwidth = value; break; default: break;
}
}
int hmat_update_target_coordinates(int nid, struct access_coordinate *coord, enum access_coordinate_class access)
{ struct memory_target *target; int pxm;
switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) { case ACPI_HMAT_CA_DIRECT_MAPPED:
tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP; /* Extended Linear mode is only valid if cache is direct mapped */ if (cache->address_mode == ACPI_HMAT_CACHE_MODE_EXTENDED_LINEAR) {
tcache->cache_attrs.address_mode =
NODE_CACHE_ADDR_MODE_EXTENDED_LINEAR;
} break; case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
tcache->cache_attrs.indexing = NODE_CACHE_INDEXED; break; case ACPI_HMAT_CA_NONE: default:
tcache->cache_attrs.indexing = NODE_CACHE_OTHER; break;
}
switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) { case ACPI_HMAT_CP_WB:
tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK; break; case ACPI_HMAT_CP_WT:
tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH; break; case ACPI_HMAT_CP_NONE: default:
tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER; break;
}
list_add_tail(&tcache->node, &target->caches);
staticvoid hmat_update_target_attrs(struct memory_target *target, unsignedlong *p_nodes, int access)
{ struct memory_initiator *initiator; unsignedint cpu_nid; struct memory_locality *loc = NULL;
u32 best = 0; int i;
/* Don't update if an external agent has changed the data. */ if (target->ext_updated) return;
/* Don't update for generic port if there's no device handle */ if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
!(*(u16 *)target->gen_port_device_handle)) return;
bitmap_zero(p_nodes, MAX_NUMNODES); /* * If the Address Range Structure provides a local processor pxm, set * only that one. Otherwise, find the best performance attributes and * collect all initiators that match.
*/ if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm); if (access == ACCESS_COORDINATE_LOCAL ||
node_state(cpu_nid, N_CPU)) {
set_bit(target->processor_pxm, p_nodes); return;
}
}
if (list_empty(&localities)) return;
/* * We need the initiator list sorted so we can use bitmap_clear for * previously set initiators when we find a better memory accessor. * We'll also use the sorting to prime the candidate nodes with known * initiators.
*/
list_sort(NULL, &initiators, initiator_cmp); if (initiators_to_nodemask(p_nodes) < 0) return;
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i]; if (!loc) continue;
best = 0;
list_for_each_entry(initiator, &initiators, node) {
u32 value;
if ((access == ACCESS_COORDINATE_CPU ||
access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
!initiator->has_cpu) {
clear_bit(initiator->processor_pxm, p_nodes); continue;
} if (!test_bit(initiator->processor_pxm, p_nodes)) continue;
value = hmat_initiator_perf(target, initiator, loc->hmat_loc); if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
bitmap_clear(p_nodes, 0, initiator->processor_pxm); if (value != best)
clear_bit(initiator->processor_pxm, p_nodes);
} if (best)
hmat_update_target_access(target, loc->hmat_loc->data_type, best, access);
}
}
staticvoid __hmat_register_target_initiators(struct memory_target *target, unsignedlong *p_nodes, int access)
{ unsignedint mem_nid, cpu_nid; int i;
/* * Do not bother creating devices if no driver is available to * consume them.
*/ if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM)) return;
for (res = target->memregions.child; res; res = res->sibling) { int target_nid = pxm_to_node(target->memory_pxm);
hmem_register_resource(target_nid, res);
}
}
staticvoid hmat_hotplug_target(struct memory_target *target)
{ int nid = pxm_to_node(target->memory_pxm);
/* * Skip offline nodes. This can happen when memory marked EFI_MEMORY_SP, * "specific purpose", is applied to all the memory in a proximity * domain leading to * the node being marked offline / unplugged, or if * memory-only "hotplug" node is offline.
*/ if (nid == NUMA_NO_NODE || !node_online(nid)) return;
guard(mutex)(&target_lock); if (target->registered) return;
staticvoid hmat_register_target(struct memory_target *target)
{ /* * Devices may belong to either an offline or online * node, so unconditionally add them.
*/
hmat_register_target_devices(target);
/* * Register generic port perf numbers. The nid may not be * initialized and is still NUMA_NO_NODE.
*/
mutex_lock(&target_lock); if (*(u16 *)target->gen_port_device_handle) {
hmat_update_generic_target(target);
target->registered = true;
}
mutex_unlock(&target_lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.