target->data[0] = cpu_to_le64(STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG,
STRTAB_STE_0_CFG_NESTED));
target->data[0] |= nested_domain->ste[0] &
~cpu_to_le64(STRTAB_STE_0_CFG);
target->data[1] |= nested_domain->ste[1]; /* Merge events for DoS mitigations on eventq */
target->data[1] |= cpu_to_le64(STRTAB_STE_1_MEV);
}
/* * Create a physical STE from the virtual STE that userspace provided when it * created the nested domain. Using the vSTE userspace can request: * - Non-valid STE * - Abort STE * - Bypass STE (install the S2, no CD table) * - CD table STE (install the S2 and the userspace CD table)
*/ staticvoid arm_smmu_make_nested_domain_ste( struct arm_smmu_ste *target, struct arm_smmu_master *master, struct arm_smmu_nested_domain *nested_domain, bool ats_enabled)
{ unsignedint cfg =
FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(nested_domain->ste[0]));
/* * Userspace can request a non-valid STE through the nesting interface. * We relay that into an abort physical STE with the intention that * C_BAD_STE for this SID can be generated to userspace.
*/ if (!(nested_domain->ste[0] & cpu_to_le64(STRTAB_STE_0_V)))
cfg = STRTAB_STE_0_CFG_ABORT;
switch (cfg) { case STRTAB_STE_0_CFG_S1_TRANS:
arm_smmu_make_nested_cd_table_ste(target, master, nested_domain,
ats_enabled); break; case STRTAB_STE_0_CFG_BYPASS:
arm_smmu_make_s2_domain_ste(target, master,
nested_domain->vsmmu->s2_parent,
ats_enabled); break; case STRTAB_STE_0_CFG_ABORT: default:
arm_smmu_make_abort_ste(target); break;
}
}
int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state, struct arm_smmu_nested_domain *nested_domain)
{ struct arm_smmu_vmaster *vmaster; unsignedlong vsid; int ret;
iommu_group_mutex_assert(state->master->dev);
ret = iommufd_viommu_get_vdev_id(&nested_domain->vsmmu->core,
state->master->dev, &vsid); if (ret) return ret;
if (nested_domain->vsmmu->smmu != master->smmu) return -EINVAL; if (arm_smmu_ssids_in_use(&master->cd_table)) return -EBUSY;
mutex_lock(&arm_smmu_asid_lock); /* * The VM has to control the actual ATS state at the PCI device because * we forward the invalidations directly from the VM. If the VM doesn't * think ATS is on it will not generate ATC flushes and the ATC will * become incoherent. Since we can't access the actual virtual PCI ATS * config bit here base this off the EATS value in the STE. If the EATS * is set then the VM must generate ATC flushes.
*/
state.disable_ats = !nested_domain->enable_ats;
ret = arm_smmu_attach_prepare(&state, domain); if (ret) {
mutex_unlock(&arm_smmu_asid_lock); return ret;
}
/* * Only Full ATS or ATS UR is supported * The EATS field will be set by arm_smmu_make_nested_domain_ste()
*/
eats = FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(arg->ste[1]));
arg->ste[1] &= ~cpu_to_le64(STRTAB_STE_1_EATS); if (eats != STRTAB_STE_1_EATS_ABT && eats != STRTAB_STE_1_EATS_TRANS) return -EIO;
staticint arm_vsmmu_vsid_to_sid(struct arm_vsmmu *vsmmu, u32 vsid, u32 *sid)
{ struct arm_smmu_master *master; struct device *dev; int ret = 0;
xa_lock(&vsmmu->core.vdevs);
dev = iommufd_viommu_find_dev(&vsmmu->core, (unsignedlong)vsid); if (!dev) {
ret = -EIO; goto unlock;
}
master = dev_iommu_priv_get(dev);
/* At this moment, iommufd only supports PCI device that has one SID */ if (sid)
*sid = master->streams[0].id;
unlock:
xa_unlock(&vsmmu->core.vdevs); return ret;
}
/* This is basically iommu_viommu_arm_smmuv3_invalidate in u64 for conversion */ struct arm_vsmmu_invalidation_cmd { union {
u64 cmd[2]; struct iommu_viommu_arm_smmuv3_invalidate ucmd;
};
};
/* * Convert, in place, the raw invalidation command into an internal format that * can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are * stored in CPU endian. * * Enforce the VMID or SID on the command.
*/ staticint arm_vsmmu_convert_user_cmd(struct arm_vsmmu *vsmmu, struct arm_vsmmu_invalidation_cmd *cmd)
{ /* Commands are le64 stored in u64 */
cmd->cmd[0] = le64_to_cpu(cmd->ucmd.cmd[0]);
cmd->cmd[1] = le64_to_cpu(cmd->ucmd.cmd[1]);
switch (cmd->cmd[0] & CMDQ_0_OP) { case CMDQ_OP_TLBI_NSNH_ALL: /* Convert to NH_ALL */
cmd->cmd[0] = CMDQ_OP_TLBI_NH_ALL |
FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid);
cmd->cmd[1] = 0; break; case CMDQ_OP_TLBI_NH_VA: case CMDQ_OP_TLBI_NH_VAA: case CMDQ_OP_TLBI_NH_ALL: case CMDQ_OP_TLBI_NH_ASID:
cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vsmmu->vmid); break; case CMDQ_OP_ATC_INV: case CMDQ_OP_CFGI_CD: case CMDQ_OP_CFGI_CD_ALL: {
u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]);
cmds = kcalloc(array->entry_num, sizeof(*cmds), GFP_KERNEL); if (!cmds) return -ENOMEM;
cur = cmds;
end = cmds + array->entry_num;
static_assert(sizeof(*cmds) == 2 * sizeof(u64));
ret = iommu_copy_struct_from_full_user_array(
cmds, sizeof(*cmds), array,
IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3); if (ret) goto out;
last = cmds; while (cur != end) {
ret = arm_vsmmu_convert_user_cmd(vsmmu, cur); if (ret) goto out;
/* FIXME work in blocks of CMDQ_BATCH_ENTRIES and copy each block? */
cur++; if (cur != end && (cur - last) != CMDQ_BATCH_ENTRIES - 1) continue;
/* FIXME always uses the main cmdq rather than trying to group by type */
ret = arm_smmu_cmdq_issue_cmdlist(smmu, &smmu->cmdq, last->cmd,
cur - last, true); if (ret) {
cur--; goto out;
}
last = cur;
}
out:
array->entry_num = cur - cmds;
kfree(cmds); return ret;
}
if (!(smmu->features & ARM_SMMU_FEAT_NESTING)) return 0;
/* * FORCE_SYNC is not set with FEAT_NESTING. Some study of the exact HW * defect is needed to determine if arm_vsmmu_cache_invalidate() needs * any change to remove this.
*/ if (WARN_ON(smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) return 0;
/* * Must support some way to prevent the VM from bypassing the cache * because VFIO currently does not do any cache maintenance. canwbs * indicates the device is fully coherent and no cache maintenance is * ever required, even for PCI No-Snoop. S2FWB means the S1 can't make * things non-coherent using the memattr, but No-Snoop behavior is not * effected.
*/ if (!arm_smmu_master_canwbs(master) &&
!(smmu->features & ARM_SMMU_FEAT_S2FWB)) return 0;
if (viommu_type == IOMMU_VIOMMU_TYPE_ARM_SMMUV3) return VIOMMU_STRUCT_SIZE(struct arm_vsmmu, core);
if (!smmu->impl_ops || !smmu->impl_ops->get_viommu_size) return 0; return smmu->impl_ops->get_viommu_size(viommu_type);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.