/* * TODO: Reference-count the microTLB as several bus masters can be * connected to the same microTLB.
*/
/* TODO: What should we set the ASID to ? */
ipmmu_imuasid_write(mmu, utlb, 0); /* TODO: Do we need to flush the microTLB ? */
ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) |
IMUCTR_FLUSH | IMUCTR_MMUEN);
mmu->utlb_ctx[utlb] = domain->context_id;
}
/* * TTBCR * We use long descriptors and allocate the whole 32-bit VA space to * TTBR0.
*/ if (domain->mmu->features->twobit_imttbcr_sl0)
tmp = IMTTBCR_SL0_TWOBIT_LVL_1; else
tmp = IMTTBCR_SL0_LVL_1;
if (domain->mmu->features->cache_snoop)
tmp |= IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA |
IMTTBCR_IRGN0_WB_WA;
/* * IMCTR * Enable the MMU and interrupt generation. The long-descriptor * translation table format doesn't use TEX remapping. Don't enable AF * software management as we have no use for it. Flush the TLB as * required when modifying the context registers.
*/
ipmmu_ctx_write_all(domain, IMCTR,
IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN);
}
staticint ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{ int ret;
/* * Allocate the page table operations. * * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory * access, Long-descriptor format" that the NStable bit being set in a * table descriptor will result in the NStable and NS bits of all child * entries being ignored and considered as being set. The IPMMU seems * not to comply with this, as it generates a secure access page fault * if any of the NStable and NS bits isn't set when running in * non-secure mode.
*/
domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS;
domain->cfg.pgsize_bitmap = domain->io_domain.pgsize_bitmap;
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_flush_ops;
domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
domain->io_domain.geometry.force_aperture = true; /* * TODO: Add support for coherent walk through CCI with DVM and remove * cache handling. For now, delegate it to the io-pgtable code.
*/
domain->cfg.coherent_walk = false;
domain->cfg.iommu_dev = domain->mmu->root->dev;
/* * Find an unused context.
*/
ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); if (ret < 0) return ret;
/* * Clear the error status flags. Unlike traditional interrupt flag * registers that must be cleared by writing 1, this status register * seems to require 0. The error address register must be read before, * otherwise its value will be 0.
*/
ipmmu_ctx_write_root(domain, IMSTR, 0);
/* Log fatal errors. */ if (status & IMSTR_MHIT)
dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n",
iova); if (status & IMSTR_ABORT)
dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n",
iova);
if (!(status & (IMSTR_PF | IMSTR_TF))) return IRQ_NONE;
/* * Try to handle page faults and translation faults. * * TODO: We need to look up the faulty device based on the I/O VA. Use * the IOMMU device for now.
*/ if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) return IRQ_HANDLED;
dev_err_ratelimited(mmu->dev, "Unhandled fault: status 0x%08x iova 0x%lx\n",
status, iova);
/* * Check interrupts for all active contexts.
*/ for (i = 0; i < mmu->num_ctx; i++) { if (!mmu->domains[i]) continue; if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED)
status = IRQ_HANDLED;
}
/* * Free the domain resources. We assume that all devices have already * been detached.
*/
ipmmu_domain_destroy_context(domain);
free_io_pgtable_ops(domain->iop);
kfree(domain);
}
if (!mmu) {
dev_err(dev, "Cannot attach to IPMMU\n"); return -ENXIO;
}
mutex_lock(&domain->mutex);
if (!domain->mmu) { /* The domain hasn't been used yet, initialize it. */
domain->mmu = mmu;
ret = ipmmu_domain_init_context(domain); if (ret < 0) {
dev_err(dev, "Unable to initialize IPMMU context\n");
domain->mmu = NULL;
} else {
dev_info(dev, "Using IPMMU context %u\n",
domain->context_id);
}
} elseif (domain->mmu != mmu) { /* * Something is wrong, we can't attach two devices using * different IOMMUs to the same domain.
*/
ret = -EINVAL;
} else
dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id);
mutex_unlock(&domain->mutex);
if (ret < 0) return ret;
for (i = 0; i < fwspec->num_ids; ++i)
ipmmu_utlb_enable(domain, fwspec->ids[i]);
/* * R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices. * For Other SoCs, this returns true anyway.
*/ if (!soc_device_match(soc_needs_opt_in)) returntrue;
/* Check whether this SoC can use the IPMMU correctly or not */ if (soc_device_match(soc_denylist)) returnfalse;
/* Check whether this device is a PCI device */ if (dev_is_pci(dev)) returntrue;
/* Check whether this device can work with the IPMMU */ for (i = 0; i < ARRAY_SIZE(devices_allowlist); i++) { if (!strcmp(dev_name(dev), devices_allowlist[i])) returntrue;
}
/* Otherwise, do not allow use of IPMMU */ returnfalse;
}
/* * Create the ARM mapping, used by the ARM DMA mapping core to allocate * VAs. This will allocate a corresponding IOMMU domain. * * TODO: * - Create one mapping per context (TLB). * - Make the mapping size configurable ? We currently use a 2GB mapping * at a 1GB offset to ensure that NULL VAs will fault.
*/ if (!mmu->mapping) { struct dma_iommu_mapping *mapping;
mapping = arm_iommu_create_mapping(dev, SZ_1G, SZ_2G); if (IS_ERR(mapping)) {
dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n");
ret = PTR_ERR(mapping); goto error;
}
mmu->mapping = mapping;
}
/* Attach the ARM VA mapping to the device. */
ret = arm_iommu_attach_device(dev, mmu->mapping); if (ret < 0) {
dev_err(dev, "Failed to attach device to VA mapping\n"); goto error;
}
return 0;
error: if (mmu->mapping)
arm_iommu_release_mapping(mmu->mapping);
mmu->dev = &pdev->dev;
spin_lock_init(&mmu->lock);
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); if (ret) return ret;
/* Map I/O memory and request IRQ. */
mmu->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mmu->base)) return PTR_ERR(mmu->base);
/* * The IPMMU has two register banks, for secure and non-secure modes. * The bank mapped at the beginning of the IPMMU address space * corresponds to the running mode of the CPU. When running in secure * mode the non-secure register bank is also available at an offset. * * Secure mode operation isn't clearly documented and is thus currently * not implemented in the driver. Furthermore, preliminary tests of * non-secure operation with the main register bank were not successful. * Offset the registers base unconditionally to point to the non-secure * alias space for now.
*/ if (mmu->features->use_ns_alias_offset)
mmu->base += IM_NS_ALIAS_OFFSET;
/* * Determine if this IPMMU instance is a root device by checking for * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
*/ if (!mmu->features->has_cache_leaf_nodes ||
!of_property_present(pdev->dev.of_node, "renesas,ipmmu-main"))
mmu->root = mmu; else
mmu->root = ipmmu_find_root();
/* * Wait until the root device has been registered for sure.
*/ if (!mmu->root) return -EPROBE_DEFER;
/* Root devices have mandatory IRQs */ if (ipmmu_is_root(mmu)) {
irq = platform_get_irq(pdev, 0); if (irq < 0) return irq;
ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
dev_name(&pdev->dev), mmu); if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); return ret;
}
ipmmu_device_reset(mmu);
if (mmu->features->reserved_context) {
dev_info(&pdev->dev, "IPMMU context 0 is reserved\n");
set_bit(0, mmu->ctx);
}
}
platform_set_drvdata(pdev, mmu); /* * Register the IPMMU to the IOMMU subsystem in the following cases: * - R-Car Gen2 IPMMU (all devices registered) * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device)
*/ if (mmu->features->has_cache_leaf_nodes && ipmmu_is_root(mmu)) return 0;
ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s",
dev_name(&pdev->dev)); if (ret) return ret;
ret = iommu_device_register(&mmu->iommu, &ipmmu_ops, &pdev->dev); if (ret)
iommu_device_sysfs_remove(&mmu->iommu);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.