// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016, Semihalf * Author: Tomasz Nowicki <tn@semihalf.com> * * This file implements early detection/parsing of I/O mapping * reported to OS through firmware via I/O Remapping Table (IORT) * IORT document number: ARM DEN 0049A
*/
/** * iort_set_fwnode() - Create iort_fwnode and use it to register * iommu data in the iort_fwnode_list * * @iort_node: IORT table node associated with the IOMMU * @fwnode: fwnode associated with the IORT node * * Returns: 0 on success * <0 on failure
*/ staticinlineint iort_set_fwnode(struct acpi_iort_node *iort_node, struct fwnode_handle *fwnode)
{ struct iort_fwnode *np;
/** * iort_register_domain_token() - register domain token along with related * ITS ID and base address to the list from where we can get it back later on. * @trans_id: ITS ID. * @base: ITS base address. * @fw_node: Domain token. * * Returns: 0 on success, -ENOMEM if no memory when allocating list element
*/ int iort_register_domain_token(int trans_id, phys_addr_t base, struct fwnode_handle *fw_node)
{ struct iort_its_msi_chip *its_msi_chip;
its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL); if (!its_msi_chip) return -ENOMEM;
/** * iort_find_domain_token() - Find domain token based on given ITS ID * @trans_id: ITS ID. * * Returns: domain token when find on the list, NULL otherwise
*/ struct fwnode_handle *iort_find_domain_token(int trans_id)
{ struct fwnode_handle *fw_node = NULL; struct iort_its_msi_chip *its_msi_chip;
/* Get the first IORT node */
iort = (struct acpi_table_iort *)iort_table;
iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
iort->node_offset);
iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
iort_table->length);
for (i = 0; i < iort->node_count; i++) { if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, "IORT node pointer overflows, bad table!\n")) return NULL;
if (iort_node->type == type &&
ACPI_SUCCESS(callback(iort_node, context))) return iort_node;
/* * Walk the device tree to find a device with an * ACPI companion; there is no point in scanning * IORT for a device matching a named component if * the device does not have an ACPI companion to * start with.
*/ do {
adev = ACPI_COMPANION(nc_dev); if (adev) break;
nc_dev = nc_dev->parent;
} while (nc_dev);
if (!adev) goto out;
status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf); if (ACPI_FAILURE(status)) {
dev_warn(nc_dev, "Can't get device full path name\n"); goto out;
}
bus = to_pci_bus(dev);
pci_rc = (struct acpi_iort_root_complex *)node->node_data;
/* * It is assumed that PCI segment numbers maps one-to-one * with root complexes. Each segment number can represent only * one root complex.
*/
status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
AE_OK : AE_NOT_FOUND;
}
out: return status;
}
staticint iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
u32 *rid_out, bool check_overlap)
{ /* Single mapping does not care for input id */ if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) { if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
*rid_out = map->output_base; return 0;
}
pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
map, type); return -ENXIO;
}
if (check_overlap) { /* * We already found a mapping for this input ID at the end of * another region. If it coincides with the start of this * region, we assume the prior match was due to the off-by-1 * issue mentioned below, and allow it to be superseded. * Otherwise, things are *really* broken, and we just disregard * duplicate matches entirely to retain compatibility.
*/
pr_err(FW_BUG "[map %p] conflicting mapping for input ID 0x%x\n",
map, rid_in); if (rid_in != map->input_base) return -ENXIO;
/* * Due to confusion regarding the meaning of the id_count field (which * carries the number of IDs *minus 1*), we may have to disregard this * match if it is at the end of the range, and overlaps with the start * of another one.
*/ if (map->id_count > 0 && rid_in == map->input_base + map->id_count) return -EAGAIN; return 0;
}
switch (node->type) { case ACPI_IORT_NODE_SMMU_V3: /* * SMMUv3 dev ID mapping index was introduced in revision 1 * table, not available in revision 0
*/ if (node->revision < 1) return -EINVAL;
smmu = (struct acpi_iort_smmu_v3 *)node->node_data; /* * Until IORT E.e (node rev. 5), the ID mapping index was * defined to be valid unless all interrupts are GSIV-based.
*/ if (node->revision < 5) { if (smmu->event_gsiv && smmu->pri_gsiv &&
smmu->gerr_gsiv && smmu->sync_gsiv) return -EINVAL;
} elseif (!(smmu->flags & ACPI_IORT_SMMU_V3_DEVICEID_VALID)) { return -EINVAL;
}
if (smmu->id_mapping_index >= node->mapping_count) {
pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
node, node->type); return -EINVAL;
}
return smmu->id_mapping_index; case ACPI_IORT_NODE_PMCG:
pmcg = (struct acpi_iort_pmcg *)node->node_data; if (pmcg->overflow_gsiv || node->mapping_count == 0) return -EINVAL;
/* Parse the ID mapping tree to find specified node type */ while (node) { struct acpi_iort_id_mapping *map; int i, index, rc = 0;
u32 out_ref = 0, map_id = id;
if (IORT_TYPE_MASK(node->type) & type_mask) { if (id_out)
*id_out = id; return node;
}
if (!node->mapping_offset || !node->mapping_count) goto fail_map;
/* Firmware bug! */ if (!map->output_reference) {
pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
node, node->type); goto fail_map;
}
/* * Get the special ID mapping index (if any) and skip its * associated ID map to prevent erroneous multi-stage * IORT ID translations.
*/
index = iort_get_id_mapping_index(node);
/* Do the ID translation */ for (i = 0; i < node->mapping_count; i++, map++) { /* if it is special mapping index, skip it */ if (i == index) continue;
rc = iort_id_map(map, node->type, map_id, &id, out_ref); if (!rc) break; if (rc == -EAGAIN)
out_ref = map->output_reference;
}
if (i == node->mapping_count && !out_ref) goto fail_map;
/* step 1: retrieve the initial dev id */
parent = iort_node_get_id(node, &id, index); if (!parent) return NULL;
/* * optional step 2: map the initial dev id if its parent is not * the target type we want, map it again for the use cases such * as NC (named component) -> SMMU -> ITS. If the type is matched, * return the initial dev id and its parent pointer directly.
*/ if (!(IORT_TYPE_MASK(parent->type) & type_mask))
parent = iort_node_map_id(parent, id, id_out, type_mask); else if (id_out)
*id_out = id;
if (!dev_is_pci(dev)) { struct acpi_iort_node *node; /* * scan iort_fwnode_list to see if it's an iort platform * device (such as SMMU, PMCG),its iort node already cached * and associated with fwnode when iort platform devices * were initialized.
*/
node = iort_get_iort_node(dev->fwnode); if (node) return node; /* * if not, then it should be a platform device defined in * DSDT/SSDT (with Named Component node in IORT)
*/ return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev);
}
/** * iort_msi_map_id() - Map a MSI input ID for a device * @dev: The device for which the mapping is to be done. * @input_id: The device input ID. * * Returns: mapped MSI ID on success, input ID otherwise
*/
u32 iort_msi_map_id(struct device *dev, u32 input_id)
{ struct acpi_iort_node *node;
u32 dev_id;
node = iort_find_dev_node(dev); if (!node) return input_id;
/** * iort_pmsi_get_dev_id() - Get the device id for a device * @dev: The device for which the mapping is to be done. * @dev_id: The device ID found. * * Returns: 0 for successful find a dev id, -ENODEV on error
*/ int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
{ int i, index; struct acpi_iort_node *node;
node = iort_find_dev_node(dev); if (!node) return -ENODEV;
index = iort_get_id_mapping_index(node); /* if there is a valid index, go get the dev_id directly */ if (index >= 0) { if (iort_node_get_id(node, dev_id, index)) return 0;
} else { for (i = 0; i < node->mapping_count; i++) { if (iort_node_map_platform_id(node, dev_id,
IORT_MSI_TYPE, i)) return 0;
}
}
return -ENODEV;
}
staticint __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
{ struct iort_its_msi_chip *its_msi_chip; int ret = -ENODEV;
spin_lock(&iort_msi_chip_lock);
list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) { if (its_msi_chip->translation_id == its_id) {
*base = its_msi_chip->base_addr;
ret = 0; break;
}
}
spin_unlock(&iort_msi_chip_lock);
return ret;
}
/** * iort_dev_find_its_id() - Find the ITS identifier for a device * @dev: The device. * @id: Device's ID * @idx: Index of the ITS identifier list. * @its_id: ITS identifier. * * Returns: 0 on success, appropriate error value otherwise
*/ staticint iort_dev_find_its_id(struct device *dev, u32 id, unsignedint idx, int *its_id)
{ struct acpi_iort_its_group *its; struct acpi_iort_node *node;
node = iort_find_dev_node(dev); if (!node) return -ENXIO;
node = iort_node_map_id(node, id, NULL, IORT_MSI_TYPE); if (!node) return -ENXIO;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)node->node_data; if (idx >= its->its_count) {
dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
idx, its->its_count); return -ENXIO;
}
*its_id = its->identifiers[idx]; return 0;
}
/** * iort_get_device_domain() - Find MSI domain related to a device * @dev: The device. * @id: Requester ID for the device. * @bus_token: irq domain bus token. * * Returns: the MSI domain for this device, NULL otherwise
*/ struct irq_domain *iort_get_device_domain(struct device *dev, u32 id, enum irq_domain_bus_token bus_token)
{ struct fwnode_handle *handle; int its_id;
if (iort_dev_find_its_id(dev, id, 0, &its_id)) return NULL;
handle = iort_find_domain_token(its_id); if (!handle) return NULL;
if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP) return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)msi_parent->node_data;
iort_fwnode = iort_find_domain_token(its->identifiers[0]); if (!iort_fwnode) return;
domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI); if (domain)
dev_set_msi_domain(dev, domain);
}
/** * iort_get_platform_device_domain() - Find MSI domain related to a * platform device * @dev: the dev pointer associated with the platform device * * Returns: the MSI domain for this device, NULL otherwise
*/ staticstruct irq_domain *iort_get_platform_device_domain(struct device *dev)
{ struct acpi_iort_node *node, *msi_parent = NULL; struct fwnode_handle *iort_fwnode; struct acpi_iort_its_group *its; int i;
/* find its associated iort node */
node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
iort_match_node_callback, dev); if (!node) return NULL;
/* then find its msi parent node */ for (i = 0; i < node->mapping_count; i++) {
msi_parent = iort_node_map_platform_id(node, NULL,
IORT_MSI_TYPE, i); if (msi_parent) break;
}
if (!msi_parent) return NULL;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)msi_parent->node_data;
iort_fwnode = iort_find_domain_token(its->identifiers[0]); if (!iort_fwnode) return NULL;
/* * Make sure the kernel has preserved the boot firmware PCIe * configuration. This is required to ensure that the RMR PCIe * StreamIDs are still valid (Refer: ARM DEN 0049E.d Section 3.1.1.5).
*/ if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
if (!host->preserve_config) returnfalse;
}
for (i = 0; i < fwspec->num_ids; i++) { if (fwspec->ids[i] >= id_start &&
fwspec->ids[i] <= id_start + id_count) returntrue;
}
/* * Go through the ID mappings and see if we have a match for SMMU * and dev(if !NULL). If found, get the sids for the Node. * Please note, id_count is equal to the number of IDs in the * range minus one.
*/ for (i = 0; i < node->mapping_count; i++, map++) { struct acpi_iort_node *parent;
for (i = 0; i < iort->node_count; i++) { if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND, "IORT node pointer overflows, bad table!\n")) return;
if (iort_node->type == ACPI_IORT_NODE_RMR)
iort_node_get_rmr_info(iort_node, iommu, dev, head);
/* * Populate the RMR list associated with a given IOMMU and dev(if provided). * If dev is NULL, the function populates all the RMRs associated with the * given IOMMU.
*/ staticvoid iort_iommu_rmr_get_resv_regions(struct fwnode_handle *iommu_fwnode, struct device *dev, struct list_head *head)
{ struct acpi_iort_node *iommu;
iommu = iort_get_iort_node(iommu_fwnode); if (!iommu) return;
/* * Retrieve platform specific HW MSI reserve regions. * The ITS interrupt translation spaces (ITS_base + SZ_64K, SZ_64K) * associated with the device are the HW MSI reserved regions.
*/ staticvoid iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
{ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct acpi_iort_its_group *its; struct acpi_iort_node *iommu_node, *its_node = NULL; int i;
iommu_node = iort_get_msi_resv_iommu(dev); if (!iommu_node) return;
/* * Current logic to reserve ITS regions relies on HW topologies * where a given PCI or named component maps its IDs to only one * ITS group; if a PCI or named component can map its IDs to * different ITS groups through IORT mappings this function has * to be reworked to ensure we reserve regions for all ITS groups * a given PCI or named component may map IDs to.
*/
for (i = 0; i < fwspec->num_ids; i++) {
its_node = iort_node_map_id(iommu_node,
fwspec->ids[i],
NULL, IORT_MSI_TYPE); if (its_node) break;
}
if (!its_node) return;
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)its_node->node_data;
for (i = 0; i < its->its_count; i++) {
phys_addr_t base;
if (!iort_find_its_base(its->identifiers[i], &base)) { int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; struct iommu_resv_region *region;
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI,
GFP_KERNEL); if (region)
list_add_tail(®ion->list, head);
}
}
}
/** * iort_iommu_get_resv_regions - Generic helper to retrieve reserved regions. * @dev: Device from iommu_get_resv_regions() * @head: Reserved region list from iommu_get_resv_regions()
*/ void iort_iommu_get_resv_regions(struct device *dev, struct list_head *head)
{ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
staticinlinebool iort_iommu_driver_enabled(u8 type)
{ switch (type) { case ACPI_IORT_NODE_SMMU_V3: return IS_ENABLED(CONFIG_ARM_SMMU_V3); case ACPI_IORT_NODE_SMMU: return IS_ENABLED(CONFIG_ARM_SMMU); default:
pr_warn("IORT node type %u does not describe an SMMU\n", type); returnfalse;
}
}
staticint __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
{ struct acpi_iort_smmu_v3 *smmu; /* Always present mem resource */ int num_res = 1;
/* Retrieve SMMUv3 specific data */
smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
if (smmu->event_gsiv)
num_res++;
if (smmu->pri_gsiv)
num_res++;
if (smmu->gerr_gsiv)
num_res++;
if (smmu->sync_gsiv)
num_res++;
return num_res;
}
staticbool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
{ /* * Cavium ThunderX2 implementation doesn't not support unique * irq line. Use single irq line for all the SMMUv3 interrupts.
*/ if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) returnfalse;
/* * ThunderX2 doesn't support MSIs from the SMMU, so we're checking * SPI numbers here.
*/ return smmu->event_gsiv == smmu->pri_gsiv &&
smmu->event_gsiv == smmu->gerr_gsiv &&
smmu->event_gsiv == smmu->sync_gsiv;
}
staticunsignedlong arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
{ /* * Override the size, for Cavium ThunderX2 implementation * which doesn't support the page 1 SMMU register space.
*/ if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) return SZ_64K;
/* Retrieve SMMU specific data */
smmu = (struct acpi_iort_smmu *)node->node_data;
/* * Only consider the global fault interrupt and ignore the * configuration access interrupt. * * MMIO address and global fault interrupt resources are always * present so add them to the context interrupt count as a static * value.
*/ return smmu->context_interrupt_count + 2;
}
/* Retrieve PMCG specific data */
pmcg = (struct acpi_iort_pmcg *)node->node_data;
res[0].start = pmcg->page0_base_address;
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
res[0].flags = IORESOURCE_MEM; /* * The initial version in DEN0049C lacked a way to describe register * page 1, which makes it broken for most PMCG implementations; in * that case, just let the driver fail gracefully if it expects to * find a second memory resource.
*/ if (node->revision > 0) {
res[1].start = pmcg->page1_base_address;
res[1].end = pmcg->page1_base_address + SZ_4K - 1;
res[1].flags = IORESOURCE_MEM;
}
if (pmcg->overflow_gsiv)
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
ACPI_EDGE_SENSITIVE, &res[2]);
}
static __init conststruct iort_dev_config *iort_get_dev_cfg( struct acpi_iort_node *node)
{ switch (node->type) { case ACPI_IORT_NODE_SMMU_V3: return &iort_arm_smmu_v3_cfg; case ACPI_IORT_NODE_SMMU: return &iort_arm_smmu_cfg; case ACPI_IORT_NODE_PMCG: return &iort_arm_smmu_v3_pmcg_cfg; default: return NULL;
}
}
/** * iort_add_platform_device() - Allocate a platform device for IORT node * @node: Pointer to device ACPI IORT node * @ops: Pointer to IORT device config struct * * Returns: 0 on success, <0 failure
*/ staticint __init iort_add_platform_device(struct acpi_iort_node *node, conststruct iort_dev_config *ops)
{ struct fwnode_handle *fwnode; struct platform_device *pdev; struct resource *r; int ret, count;
pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO); if (!pdev) return -ENOMEM;
if (ops->dev_set_proximity) {
ret = ops->dev_set_proximity(&pdev->dev, node); if (ret) goto dev_put;
}
count = ops->dev_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL); if (!r) {
ret = -ENOMEM; goto dev_put;
}
ops->dev_init_resources(r, node);
ret = platform_device_add_resources(pdev, r, count); /* * Resources are duplicated in platform_device_add_resources, * free their allocated memory
*/
kfree(r);
if (ret) goto dev_put;
/* * Platform devices based on PMCG nodes uses platform_data to * pass the hardware model info to the driver. For others, add * a copy of IORT node pointer to platform_data to be used to * retrieve IORT data information.
*/ if (ops->dev_add_platdata)
ret = ops->dev_add_platdata(pdev); else
ret = platform_device_add_data(pdev, &node, sizeof(node));
if (ret) goto dev_put;
fwnode = iort_get_fwnode(node);
if (!fwnode) {
ret = -ENODEV; goto dev_put;
}
pdev->dev.fwnode = fwnode;
if (ops->dev_dma_configure)
ops->dev_dma_configure(&pdev->dev, node);
iort_set_device_domain(&pdev->dev, node);
ret = platform_device_add(pdev); if (ret) goto dma_deconfigure;
/* iort_table will be used at runtime after the iort init, * so we don't need to call acpi_put_table() to release * the IORT table mapping.
*/
status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table); if (ACPI_FAILURE(status)) { if (status != AE_NOT_FOUND) { constchar *msg = acpi_format_exception(status);
pr_err("Failed to get table, %s\n", msg);
}
return;
}
iort_init_platform_devices();
}
#ifdef CONFIG_ZONE_DMA /* * Extract the highest CPU physical address accessible to all DMA masters in * the system. PHYS_ADDR_MAX is returned when no constrained device is found.
*/
phys_addr_t __init acpi_iort_dma_get_max_cpu_address(void)
{
phys_addr_t limit = PHYS_ADDR_MAX; struct acpi_iort_node *node, *end; struct acpi_table_iort *iort;
acpi_status status; int i;
if (acpi_disabled) return limit;
status = acpi_get_table(ACPI_SIG_IORT, 0,
(struct acpi_table_header **)&iort); if (ACPI_FAILURE(status)) return limit;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.