/* No xormaps for host bridge interleave ways of 1 or 3 */ if (hbiw == 1 || hbiw == 3) return hpa;
/* * For root decoders using xormaps (hbiw: 2,4,6,8,12,16) restore * the position bit to its value before the xormap was applied at * HPA->DPA translation. * * pos is the lowest set bit in an XORMAP * val is the XORALLBITS(HPA & XORMAP) * * XORALLBITS: The CXL spec (3.1 Table 9-22) defines XORALLBITS * as an operation that outputs a single bit by XORing all the * bits in the input (hpa & xormap). Implement XORALLBITS using * hweight64(). If the hamming weight is even the XOR of those * bits results in val==0, if odd the XOR result is val==1.
*/
for (int i = 0; i < cximsd->nr_maps; i++) { if (!cximsd->xormaps[i]) continue;
pos = __ffs(cximsd->xormaps[i]);
val = (hweight64(hpa & cximsd->xormaps[i]) & 1);
hpa = (hpa & ~(1ULL << pos)) | (val << pos);
}
rc = eig_to_granularity(cxims->hbig, &hbig); if (rc) return rc;
/* Does this CXIMS entry apply to the given CXL Window? */ if (hbig != cxld->interleave_granularity) return 0;
/* IW 1,3 do not use xormaps and skip this parsing entirely */ if (is_power_of_2(cxld->interleave_ways)) /* 2, 4, 8, 16 way */
nr_maps = ilog2(cxld->interleave_ways); else /* 6, 12 way */
nr_maps = ilog2(cxld->interleave_ways / 3);
if (cfmws->header.length < expected_len) {
dev_err(dev, "CFMWS length %d less than expected %d\n",
cfmws->header.length, expected_len); return -EINVAL;
}
if (cfmws->header.length > expected_len)
dev_dbg(dev, "CFMWS length %d greater than expected %d\n",
cfmws->header.length, expected_len);
return 0;
}
/* * Note, @dev must be the first member, see 'struct cxl_chbs_context' * and mock_acpi_table_parse_cedt()
*/ struct cxl_cfmws_context { struct device *dev; struct cxl_port *root_port; struct resource *cxl_res; int id;
};
/** * cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM * @handle: ACPI handle * @coord: performance access coordinates * @entries: number of QTG IDs to return * @qos_class: int array provided by caller to return QTG IDs * * Return: number of QTG IDs returned, or -errno for errors * * Issue QTG _DSM with accompanied bandwidth and latency data in order to get * the QTG IDs that are suitable for the performance point in order of most * suitable to least suitable. Write back array of QTG IDs and return the * actual number of QTG IDs written back.
*/ staticint
cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord, int entries, int *qos_class)
{ union acpi_object *out_obj, *out_buf, *obj; union acpi_object in_array[4] = {
[0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
[1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
[2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
[3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
}; union acpi_object in_obj = {
.package = {
.type = ACPI_TYPE_PACKAGE,
.count = 4,
.elements = in_array,
},
}; int count, pkg_entries, i;
u16 max_qtg; int rc;
res = DEFINE_RES_MEM(start, size);
nid = phys_to_target_node(start);
rc = hmat_get_extended_linear_cache_size(&res, nid, &cache_size); if (rc) return rc;
/* * The cache range is expected to be within the CFMWS. * Currently there is only support cache_size == cxl_size. CXL * size is then half of the total CFMWS window size.
*/
size = size >> 1; if (cache_size && size != cache_size) {
dev_warn(&cxld->dev, "Extended Linear Cache size %pa != CXL size %pa. No Support!",
&cache_size, &size); return -ENXIO;
}
cxlrd->cache_size = cache_size;
return 0;
}
staticvoid cxl_setup_extended_linear_cache(struct cxl_root_decoder *cxlrd)
{ int rc;
rc = cxl_acpi_set_cache_size(cxlrd); if (!rc) return;
if (rc != -EOPNOTSUPP) { /* * Failing to support extended linear cache region resize does not * prevent the region from functioning. Only causes cxl list showing * incorrect region size.
*/
dev_warn(cxlrd->cxlsd.cxld.dev.parent, "Extended linear cache calculation failed rc:%d\n", rc);
}
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11 &&
chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL11) return 0;
if (chbs->cxl_version == ACPI_CEDT_CHBS_VERSION_CXL20 &&
chbs->length != ACPI_CEDT_CHBS_LENGTH_CXL20) return 0;
if (!chbs->base) return 0;
if (ctx->saved_version != chbs->cxl_version) { /* * cxl_version cannot be overwritten before the next two * checks, then use saved_version
*/
ctx->saved_version = chbs->cxl_version;
ctx->nr_versions++;
}
if (ctx->nr_versions > 1) { /* * Disclaim eRCD support given some component register may * only be found via CHBCR
*/
dev_info(dev, "Unsupported platform config, mixed Virtual Host and Restricted CXL Host hierarchy.");
}
/* * In RCH mode, bind the component regs base to the dport. In * VH mode it will be bound to the CXL host bridge's port * object later in add_host_bridge_uport().
*/ if (ctx.cxl_version == ACPI_CEDT_CHBS_VERSION_CXL11) {
dev_dbg(match, "RCRB found for UID %lld: %pa\n", ctx.uid,
&ctx.base);
dport = devm_cxl_add_rch_dport(root_port, bridge, ctx.uid,
ctx.base);
} else {
dport = devm_cxl_add_dport(root_port, bridge, ctx.uid,
CXL_RESOURCE_NONE);
}
if (IS_ERR(dport)) return PTR_ERR(dport);
ret = get_genport_coordinates(match, dport); if (ret)
dev_dbg(match, "Failed to get generic port perf coordinates.\n");
return 0;
}
/* * A host bridge is a dport to a CFMWS decode and it is a uport to the * dport (PCIe Root Ports) in the host bridge.
*/ staticint add_host_bridge_uport(struct device *match, void *arg)
{ struct cxl_port *root_port = arg; struct device *host = root_port->dev.parent; struct acpi_device *hb = to_cxl_host_bridge(host, match); struct acpi_pci_root *pci_root; struct cxl_dport *dport; struct cxl_port *port; struct device *bridge; struct cxl_chbs_context ctx;
resource_size_t component_reg_phys; int rc;
if (!hb) return 0;
pci_root = acpi_pci_find_root(hb->handle);
bridge = pci_root->bus->bridge;
dport = cxl_find_dport_by_dev(root_port, bridge); if (!dport) {
dev_dbg(host, "host bridge expected and not found\n"); return 0;
}
for (res = cxl->child; res; res = next) { struct resource *victim = cxl_get_public_resource(res);
next = res->sibling;
remove_resource(res);
if (victim) {
remove_resource(victim);
kfree(victim);
}
del_cxl_resource(res);
}
}
/** * add_cxl_resources() - reflect CXL fixed memory windows in iomem_resource * @cxl_res: A standalone resource tree where each CXL window is a sibling * * Walk each CXL window in @cxl_res and add it to iomem_resource potentially * expanding its boundaries to ensure that any conflicting resources become * children. If a window is expanded it may then conflict with a another window * entry and require the window to be truncated or trimmed. Consider this * situation:: * * |-- "CXL Window 0" --||----- "CXL Window 1" -----| * |--------------- "System RAM" -------------| * * ...where platform firmware has established as System RAM resource across 2 * windows, but has left some portion of window 1 for dynamic CXL region * provisioning. In this case "Window 0" will span the entirety of the "System * RAM" span, and "CXL Window 1" is truncated to the remaining tail past the end * of that "System RAM" resource.
*/ staticint add_cxl_resources(struct resource *cxl_res)
{ struct resource *res, *new, *next;
for (res = cxl_res->child; res; res = next) { new = kzalloc(sizeof(*new), GFP_KERNEL); if (!new) return -ENOMEM;
new->name = res->name;
new->start = res->start;
new->end = res->end;
new->flags = IORESOURCE_MEM;
new->desc = IORES_DESC_CXL;
/* * Record the public resource in the private cxl_res tree for * later removal.
*/
cxl_set_public_resource(res, new);
rc = add_cxl_resources(cxl_res); if (rc) return rc;
/* * Populate the root decoders with their related iomem resource, * if present
*/
device_for_each_child(&root_port->dev, cxl_res, pair_cxl_resource);
/* * Root level scanned with host-bridge as dports, now scan host-bridges * for their role as CXL uports to their CXL-capable PCIe Root Ports.
*/
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_uport); if (rc < 0) return rc;
if (IS_ENABLED(CONFIG_CXL_PMEM))
rc = device_for_each_child(&root_port->dev, root_port,
add_root_nvdimm_bridge); if (rc < 0) return rc;
/* In case PCI is scanned before ACPI re-trigger memdev attach */
cxl_bus_rescan(); return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.