/* * Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU * global register space are still, in fact, using a hypervisor to mediate it * by trapping and emulating register accesses. Sadly, some deployed versions * of said trapping code have bugs wherein they go horribly wrong for stores * using r31 (i.e. XZR/WZR) as the source register.
*/ #define QCOM_DUMMY_VAL -1
staticint force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage, "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation."); staticbool disable_bypass =
IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT);
module_param(disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
staticinlineint arm_smmu_rpm_get(struct arm_smmu_device *smmu)
{ if (pm_runtime_enabled(smmu->dev)) return pm_runtime_resume_and_get(smmu->dev);
return 0;
}
staticinlinevoid arm_smmu_rpm_put(struct arm_smmu_device *smmu)
{ if (pm_runtime_enabled(smmu->dev)) {
pm_runtime_mark_last_busy(smmu->dev);
__pm_runtime_put_autosuspend(smmu->dev);
}
}
staticvoid arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
{ /* * Setup an autosuspend delay to avoid bouncing runpm state. * Otherwise, if a driver for a suspended consumer device * unmaps buffers, it will runpm resume/suspend for each one. * * For example, when used by a GPU device, when an application * or game exits, it can trigger unmapping 100s or 1000s of * buffers. With a runpm cycle for each buffer, that adds up * to 5-10sec worth of reprogramming the context bank, while * the system appears to be locked up to the user.
*/
pm_runtime_set_autosuspend_delay(smmu->dev, 20);
pm_runtime_use_autosuspend(smmu->dev);
}
staticvoid __arm_smmu_free_bitmap(unsignedlong *map, int idx)
{
clear_bit(idx, map);
}
/* Wait for any pending TLB invalidations to complete */ staticvoid __arm_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, int sync, int status)
{ unsignedint spin_cnt, delay;
u32 reg;
if (smmu->impl && unlikely(smmu->impl->tlb_sync)) return smmu->impl->tlb_sync(smmu, page, sync, status);
arm_smmu_writel(smmu, page, sync, QCOM_DUMMY_VAL); for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
reg = arm_smmu_readl(smmu, page, status); if (!(reg & ARM_SMMU_sTLBGSTATUS_GSACTIVE)) return;
cpu_relax();
}
udelay(delay);
}
dev_err_ratelimited(smmu->dev, "TLB sync timed out -- SMMU may be deadlocked\n");
}
staticvoid arm_smmu_tlb_inv_context_s1(void *cookie)
{ struct arm_smmu_domain *smmu_domain = cookie; /* * The TLBI write may be relaxed, so ensure that PTEs cleared by the * current CPU are visible beforehand.
*/
wmb();
arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx,
ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid);
arm_smmu_tlb_sync_context(smmu_domain);
}
staticvoid arm_smmu_tlb_inv_walk_s2_v1(unsignedlong iova, size_t size,
size_t granule, void *cookie)
{
arm_smmu_tlb_inv_context_s2(cookie);
} /* * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears * almost negligible, but the benefit of getting the first one in as far ahead * of the sync as possible is significant, hence we don't just make this a * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might * think.
*/ staticvoid arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, unsignedlong iova, size_t granule, void *cookie)
{ struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu;
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
if (__ratelimit(&rs)) { if (IS_ENABLED(CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT) &&
(gfsr & ARM_SMMU_sGFSR_USF))
dev_err(smmu->dev, "Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may have security implications\n",
(u16)gfsynr1); else
dev_err(smmu->dev, "Unexpected global fault, this could be serious\n");
dev_err(smmu->dev, "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
gfsr, gfsynr0, gfsynr1, gfsynr2);
}
/* * Use the weakest shareability/memory types, so they are * overridden by the ttbcr/pte.
*/ if (stage1) {
reg |= FIELD_PREP(ARM_SMMU_CBAR_S1_BPSHCFG,
ARM_SMMU_CBAR_S1_BPSHCFG_NSH) |
FIELD_PREP(ARM_SMMU_CBAR_S1_MEMATTR,
ARM_SMMU_CBAR_S1_MEMATTR_WB);
} elseif (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { /* 8-bit VMIDs live in CBAR */
reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid);
}
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(idx), reg);
/* * TCR * We must write this before the TTBRs, since it determines the * access behaviour of some fields (in particular, ASID[15:8]).
*/ if (stage1 && smmu->version > ARM_SMMU_V1)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]);
mutex_lock(&smmu_domain->init_mutex); if (smmu_domain->smmu) goto out_unlock;
/* * Mapping the requested stage onto what we support is surprisingly * complicated, mainly because the spec allows S1+S2 SMMUs without * support for nested translation. That means we end up with the * following table: * * Requested Supported Actual * S1 N S1 * S1 S1+S2 S1 * S1 S2 S2 * S1 S1 S1 * N N N * N S1+S2 S2 * N S2 S2 * N S1 S1 * * Note that you can't actually request stage-2 mappings.
*/ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
smmu_domain->stage = ARM_SMMU_DOMAIN_S2; if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
/* * Choosing a suitable context format is even more fiddly. Until we * grow some way for the caller to express a preference, and/or move * the decision into the io-pgtable code where it arguably belongs, * just aim for the closest thing to the rest of the system, and hope * that the hardware isn't esoteric enough that we can't assume AArch64 * support to be a superset of AArch32 support...
*/ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
!IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
(smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
(smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
(smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
ARM_SMMU_FEAT_FMT_AARCH64_16K |
ARM_SMMU_FEAT_FMT_AARCH64_4K)))
cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
ret = -EINVAL; goto out_unlock;
}
/* Initialise the context bank with our page table cfg */
arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
arm_smmu_write_context_bank(smmu, cfg->cbndx);
/* * Request context fault interrupt. Do this last to avoid the * handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[cfg->irptndx];
ret = arm_smmu_rpm_get(smmu); if (ret < 0) return;
/* * Disable the context bank and free the page tables before freeing * it.
*/
smmu->cbs[cfg->cbndx].cfg = NULL;
arm_smmu_write_context_bank(smmu, cfg->cbndx);
/* * Allocate the domain and initialise some of its data structures. * We can't really do anything meaningful until we've added a * master.
*/
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); if (!smmu_domain) return NULL;
/* * Free the domain resources. We assume that all devices have * already been detached.
*/
arm_smmu_destroy_domain_context(smmu_domain);
kfree(smmu_domain);
}
staticvoid arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
{
arm_smmu_write_s2cr(smmu, idx); if (smmu->smrs)
arm_smmu_write_smr(smmu, idx);
}
/* * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function * should be called after sCR0 is written.
*/ staticvoid arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
{
u32 smr; int i;
if (!smmu->smrs) return; /* * If we've had to accommodate firmware memory regions, we may * have live SMRs by now; tread carefully... * * Somewhat perversely, not having a free SMR for this test implies we * can get away without it anyway, as we'll only be able to 'allocate' * these SMRs for the ID/mask values we're already trusting to be OK.
*/ for (i = 0; i < smmu->num_mapping_groups; i++) if (!smmu->smrs[i].valid) goto smr_ok; return;
smr_ok: /* * SMR.ID bits may not be preserved if the corresponding MASK * bits are set, so check each one separately. We can reject * masters later if they try to claim IDs outside these masks.
*/
smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask);
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_SMR(i), smr);
smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr);
staticint arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
{ struct arm_smmu_smr *smrs = smmu->smrs; int i, free_idx = -ENOSPC;
/* Stream indexing is blissfully easy */ if (!smrs) return id;
/* Validating SMRs is... less so */ for (i = 0; i < smmu->num_mapping_groups; ++i) { if (!smrs[i].valid) { /* * Note the first free entry we come across, which * we'll claim in the end if nothing else matches.
*/ if (free_idx < 0)
free_idx = i; continue;
} /* * If the new entry is _entirely_ matched by an existing entry, * then reuse that, with the guarantee that there also cannot * be any subsequent conflicting entries. In normal use we'd * expect simply identical entries for this case, but there's * no harm in accommodating the generalisation.
*/ if ((mask & smrs[i].mask) == mask &&
!((id ^ smrs[i].id) & ~smrs[i].mask)) return i; /* * If the new entry has any other overlap with an existing one, * though, then there always exists at least one stream ID * which would cause a conflict, and we can't allow that risk.
*/ if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask))) return -EINVAL;
}
return free_idx;
}
staticbool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
{ if (--smmu->s2crs[idx].count) returnfalse;
smmu->s2crs[idx] = s2cr_init_val; if (smmu->smrs)
smmu->smrs[idx].valid = false;
/* * FIXME: The arch/arm DMA API code tries to attach devices to its own * domains between of_xlate() and probe_device() - we have no way to cope * with that, so until ARM gets converted to rely on groups and default * domains, just say no (but more politely than by dereferencing NULL). * This should be at least a WARN_ON once that's sorted.
*/
cfg = dev_iommu_priv_get(dev); if (!cfg) return -ENODEV;
smmu = cfg->smmu;
ret = arm_smmu_rpm_get(smmu); if (ret < 0) return ret;
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(smmu_domain, smmu, dev); if (ret < 0) goto rpm_put;
/* * Sanity check the domain. We don't support domains across * different SMMUs.
*/ if (smmu_domain->smmu != smmu) {
ret = -EINVAL; goto rpm_put;
}
/* Looks ok, so add the device to the domain */
arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
smmu_domain->cfg.cbndx, fwspec);
rpm_put:
arm_smmu_rpm_put(smmu); return ret;
}
switch (cap) { case IOMMU_CAP_CACHE_COHERENCY: /* * It's overwhelmingly the case in practice that when the pagetable * walk interface is connected to a coherent interconnect, all the * translation interfaces are too. Furthermore if the device is * natively coherent, then its translation interface must also be.
*/ return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
device_get_dma_attr(dev) == DEV_DMA_COHERENT; case IOMMU_CAP_NOEXEC: case IOMMU_CAP_DEFERRED_FLUSH: returntrue; default: returnfalse;
}
}
if (using_legacy_binding) {
ret = arm_smmu_register_legacy_master(dev, &smmu);
/* * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() * will allocate/initialise a new one. Thus we need to update fwspec for * later use.
*/
fwspec = dev_iommu_fwspec_get(dev); if (ret) goto out_free;
} else {
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
}
ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) {
u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
if (sid & ~smmu->streamid_mask) {
dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
sid, smmu->streamid_mask); goto out_free;
} if (mask & ~smmu->smr_mask_mask) {
dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
mask, smmu->smr_mask_mask); goto out_free;
}
}
ret = -ENOMEM;
cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
GFP_KERNEL); if (!cfg) goto out_free;
cfg->smmu = smmu;
dev_iommu_priv_set(dev, cfg); while (i--)
cfg->smendx[i] = INVALID_SMENDX;
ret = arm_smmu_rpm_get(smmu); if (ret < 0) goto out_cfg_free;
ret = arm_smmu_master_alloc_smes(dev);
arm_smmu_rpm_put(smmu);
mutex_lock(&smmu->stream_map_mutex);
for_each_cfg_sme(cfg, fwspec, i, idx) { if (group && smmu->s2crs[idx].group &&
group != smmu->s2crs[idx].group) {
mutex_unlock(&smmu->stream_map_mutex); return ERR_PTR(-EINVAL);
}
group = smmu->s2crs[idx].group;
}
if (group) {
mutex_unlock(&smmu->stream_map_mutex); return iommu_group_ref_get(group);
}
if (dev_is_pci(dev))
group = pci_device_group(dev); elseif (dev_is_fsl_mc(dev))
group = fsl_mc_device_group(dev); else
group = generic_device_group(dev);
/* Remember group for faster lookups */ if (!IS_ERR(group))
for_each_cfg_sme(cfg, fwspec, i, idx)
smmu->s2crs[idx].group = group;
/* * Reset stream mapping groups: Initial values mark all SMRn as * invalid and all S2CRn as bypass unless overridden.
*/ for (i = 0; i < smmu->num_mapping_groups; ++i)
arm_smmu_write_sme(smmu, i);
/* Make sure all context banks are disabled and clear CB_FSR */ for (i = 0; i < smmu->num_context_banks; ++i) {
arm_smmu_write_context_bank(smmu, i);
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
}
/* Invalidate the TLB, just in case */
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLH, QCOM_DUMMY_VAL);
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIALLNSNH, QCOM_DUMMY_VAL);
/* ID0 */
id = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_ID0);
/* Restrict available stages based on module parameter */ if (force_stage == 1)
id &= ~(ARM_SMMU_ID0_S2TS | ARM_SMMU_ID0_NTS); elseif (force_stage == 2)
id &= ~(ARM_SMMU_ID0_S1TS | ARM_SMMU_ID0_NTS);
if (id & ARM_SMMU_ID0_S1TS) {
smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
dev_notice(smmu->dev, "\tstage 1 translation\n");
}
if (id & ARM_SMMU_ID0_S2TS) {
smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
dev_notice(smmu->dev, "\tstage 2 translation\n");
}
if (id & ARM_SMMU_ID0_NTS) {
smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
dev_notice(smmu->dev, "\tnested translation\n");
}
/* * In order for DMA API calls to work properly, we must defer to what * the FW says about coherency, regardless of what the hardware claims. * Fortunately, this also opens up a workaround for systems where the * ID register value has ended up configured incorrectly.
*/
cttw_reg = !!(id & ARM_SMMU_ID0_CTTW); if (cttw_fw || cttw_reg)
dev_notice(smmu->dev, "\t%scoherent table walk\n",
cttw_fw ? "" : "non-"); if (cttw_fw != cttw_reg)
dev_notice(smmu->dev, "\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */ if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) {
smmu->features |= ARM_SMMU_FEAT_EXIDS;
size = 1 << 16;
} else {
size = 1 << FIELD_GET(ARM_SMMU_ID0_NUMSIDB, id);
}
smmu->streamid_mask = size - 1; if (id & ARM_SMMU_ID0_SMS) {
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
size = FIELD_GET(ARM_SMMU_ID0_NUMSMRG, id); if (size == 0) {
dev_err(smmu->dev, "stream-matching supported, but no SMRs present!\n"); return -ENODEV;
}
/* Zero-initialised to mark as invalid */
smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
GFP_KERNEL); if (!smmu->smrs) return -ENOMEM;
dev_notice(smmu->dev, "\tstream matching with %u register groups", size);
} /* s2cr->type == 0 means translation, so initialise explicitly */
smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
GFP_KERNEL); if (!smmu->s2crs) return -ENOMEM; for (i = 0; i < size; i++)
smmu->s2crs[i] = s2cr_init_val;
/* The output mask is also applied for bypass */
size = arm_smmu_id_size_to_bits(FIELD_GET(ARM_SMMU_ID2_OAS, id));
smmu->pa_size = size;
if (id & ARM_SMMU_ID2_VMID16)
smmu->features |= ARM_SMMU_FEAT_VMID16;
/* * What the page table walker can address actually depends on which * descriptor format is in use, but since a) we don't know that yet, * and b) it can vary per context bank, this will have to do...
*/ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
dev_warn(smmu->dev, "failed to set DMA mask for table walker\n");
if (smmu->version < ARM_SMMU_V2) {
smmu->va_size = smmu->ipa_size; if (smmu->version == ARM_SMMU_V1_64K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
} else {
size = FIELD_GET(ARM_SMMU_ID2_UBS, id);
smmu->va_size = arm_smmu_id_size_to_bits(size); if (id & ARM_SMMU_ID2_PTFS_4K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; if (id & ARM_SMMU_ID2_PTFS_16K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; if (id & ARM_SMMU_ID2_PTFS_64K)
smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
}
if (smmu->impl && smmu->impl->cfg_probe) {
ret = smmu->impl->cfg_probe(smmu); if (ret) return ret;
}
/* Now we've corralled the various formats, what'll it do? */ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; if (smmu->features &
(ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
smmu->pgsize_bitmap |= SZ_16K | SZ_32M; if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
/* * Rather than trying to look at existing mappings that * are setup by the firmware and then invalidate the ones * that do no have matching RMR entries, just disable the * SMMU until it gets enabled again in the reset routine.
*/
reg = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_sCR0);
reg |= ARM_SMMU_sCR0_CLIENTPD;
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, reg);
list_for_each_entry(e, &rmr_list, list) { struct iommu_iort_rmr_data *rmr; int i;
rmr = container_of(e, struct iommu_iort_rmr_data, rr); for (i = 0; i < rmr->num_sids; i++) {
idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0); if (idx < 0) continue;
/* * The resource size should effectively match the value of SMMU_TOP; * stash that temporarily until we know PAGESIZE to validate it with.
*/
smmu->numpage = resource_size(res);
smmu = arm_smmu_impl_init(smmu); if (IS_ERR(smmu)) return PTR_ERR(smmu);
num_irqs = platform_irq_count(pdev);
smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs; if (smmu->num_context_irqs <= 0) return dev_err_probe(dev, -ENODEV, "found %d interrupts but expected at least %d\n",
num_irqs, global_irqs + pmu_irqs + 1);
smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs, sizeof(*smmu->irqs), GFP_KERNEL); if (!smmu->irqs) return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
smmu->num_context_irqs);
for (i = 0; i < smmu->num_context_irqs; i++) { int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
if (irq < 0) return irq;
smmu->irqs[i] = irq;
}
err = devm_clk_bulk_get_all(dev, &smmu->clks); if (err < 0) {
dev_err(dev, "failed to get clocks %d\n", err); return err;
}
smmu->num_clks = err;
err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks); if (err) return err;
err = arm_smmu_device_cfg_probe(smmu); if (err) return err;
if (smmu->version == ARM_SMMU_V2) { if (smmu->num_context_banks > smmu->num_context_irqs) {
dev_err(dev, "found only %d context irq(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks); return -ENODEV;
}
for (i = 0; i < global_irqs; i++) { int irq = platform_get_irq(pdev, i);
if (irq < 0) return irq;
err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED, "arm-smmu global fault", smmu); if (err) return dev_err_probe(dev, err, "failed to request global IRQ %d (%u)\n",
i, irq);
}
platform_set_drvdata(pdev, smmu);
/* Check for RMRs and install bypass SMRs if any */
arm_smmu_rmr_install_bypass_smr(smmu);
/* * We want to avoid touching dev->power.lock in fastpaths unless * it's really going to do something useful - pm_runtime_enabled() * can serve as an ideal proxy for that decision. So, conditionally * enable pm_runtime.
*/ if (dev->pm_domain) {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
arm_smmu_rpm_use_autosuspend(smmu);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.