/* * The IOMMU HW may support 16GB iova. In order to balance the IOVA ranges, * different masters will be put in different iova ranges, for example vcodec * is in 4G-8G and cam is in 8G-12G. Meanwhile, some masters may have the * special IOVA range requirement, like CCU can only support the address * 0x40000000-0x44000000. * Here list the iova ranges this SoC supports and which larbs/ports are in * which region. * * 16GB iova all use one pgtable, but each a region is a iommu group.
*/ struct { unsignedint iova_region_nr; conststruct mtk_iommu_iova_region *iova_region; /* * Indicate the correspondance between larbs, ports and regions. * * The index is the same as iova_region and larb port numbers are * described as bit positions. * For example, storing BIT(0) at index 2,1 means "larb 1, port0 is in region 2". * [2] = { [1] = BIT(0) }
*/ const u32 (*iova_region_larb_msk)[MTK_LARB_NR_MAX];
};
/* * The IOMMU HW may have 5 banks. Each bank has a independent pgtable. * Here list how many banks this SoC supports/enables and which ports are in which bank.
*/ struct {
u8 banks_num; bool banks_enable[MTK_IOMMU_BANK_MAX]; unsignedint banks_portmsk[MTK_IOMMU_BANK_MAX];
};
/* * In the sharing pgtable case, list data->list to the global list like m4ulist. * In the non-sharing pgtable case, list data->list to the itself hw_list_head.
*/ struct list_head *hw_list; struct list_head hw_list_head; struct list_head list; struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX];
};
/* * In M4U 4GB mode, the physical address is remapped as below: * * CPU Physical address: * ==================== * * 0 1G 2G 3G 4G 5G * |---A---|---B---|---C---|---D---|---E---| * +--I/O--+------------Memory-------------+ * * IOMMU output physical address: * ============================= * * 4G 5G 6G 7G 8G * |---E---|---B---|---C---|---D---| * +------------Memory-------------+ * * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the * bit32 of the CPU physical address always is needed to set, and for Region * 'E', the CPU physical address keep as is. * Additionally, The iommu consumers always use the CPU phyiscal address.
*/ #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL
static LIST_HEAD(m4ulist); /* List all the M4U HWs */
/* If 2 M4U share a domain(use the same hwlist), Put the corresponding info in first data.*/ staticstruct mtk_iommu_data *mtk_iommu_get_frst_data(struct list_head *hwlist)
{ return list_first_entry(hwlist, struct mtk_iommu_data, list);
}
staticvoid mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
{ /* Tlb flush all always is in bank0. */ struct mtk_iommu_bank_data *bank = &data->bank[0]; void __iomem *base = bank->base; unsignedlong flags;
spin_lock_irqsave(&bank->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, base + data->plat_data->inv_sel_reg);
writel_relaxed(F_ALL_INVLD, base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
spin_unlock_irqrestore(&bank->tlb_lock, flags);
}
for_each_m4u(data, head) { /* * To avoid resume the iommu device frequently when the iommu device * is not active, it doesn't always call pm_runtime_get here, then tlb * flush depends on the tlb flush all in the runtime resume. * * There are 2 special cases: * * Case1: The iommu dev doesn't have power domain but has bclk. This case * should also avoid the tlb flush while the dev is not active to mute * the tlb timeout log. like mt8173. * * Case2: The power/clock of infra iommu is always on, and it doesn't * have the device link with the master devices. This case should avoid * the PM status check.
*/
check_pm_status = !MTK_IOMMU_HAS_FLAG(data->plat_data, PM_CLK_AO);
if (check_pm_status) { if (pm_runtime_get_if_in_use(data->dev) <= 0) continue;
}
curbank = &data->bank[bank->id];
base = curbank->base;
spin_lock_irqsave(&curbank->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
base + data->plat_data->inv_sel_reg);
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova), base + REG_MMU_INVLD_START_A);
writel_relaxed(MTK_IOMMU_TLB_ADDR(iova + size - 1),
base + REG_MMU_INVLD_END_A);
writel_relaxed(F_MMU_INV_RANGE, base + REG_MMU_INVALIDATE);
for (i = 0; i < fwspec->num_ids; ++i) {
portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
portid_msk |= BIT(portid);
}
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) { /* All ports should be in the same larb. just use 0 here */
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]);
larb_mmu = &data->larb_imu[larbid];
region = data->plat_data->iova_region + regionid;
dev_dbg(dev, "%s iommu for larb(%s) port 0x%lx region %d rgn-bank %d.\n",
str_enable_disable(enable), dev_name(larb_mmu->dev),
portid_msk, regionid, upper_32_bits(region->iova_base));
if (enable)
larb_mmu->mmu |= portid_msk; else
larb_mmu->mmu &= ~portid_msk;
} elseif (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA)) { if (MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
arm_smccc_smc(MTK_SIP_KERNEL_IOMMU_CONTROL,
IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU,
portid_msk, enable, 0, 0, 0, 0, &res);
ret = res.a0;
} else { /* PCI dev has only one output id, enable the next writing bit for PCIe */ if (dev_is_pci(dev)) { if (fwspec->num_ids != 1) {
dev_err(dev, "PCI dev can only have one port.\n"); return -ENODEV;
}
portid_msk |= BIT(portid + 1);
}
/* Share pgtable when 2 MM IOMMU share the pgtable or one IOMMU use multiple iova ranges */ if (share_dom) {
dom->iop = share_dom->iop;
dom->cfg = share_dom->cfg;
dom->domain.pgsize_bitmap = share_dom->domain.pgsize_bitmap; goto update_iova_region;
}
region_id = mtk_iommu_get_iova_region_id(dev, data->plat_data); if (region_id < 0) return region_id;
bankid = mtk_iommu_get_bank_id(dev, data->plat_data);
mutex_lock(&dom->mutex); if (!dom->bank) { /* Data is in the frstdata in sharing pgtable case. */
frstdata = mtk_iommu_get_frst_data(hw_list);
mutex_lock(&frstdata->mutex);
ret = mtk_iommu_domain_finalise(dom, frstdata, region_id);
mutex_unlock(&frstdata->mutex); if (ret) {
mutex_unlock(&dom->mutex); return ret;
}
dom->bank = &data->bank[bankid];
}
mutex_unlock(&dom->mutex);
mutex_lock(&data->mutex);
bank = &data->bank[bankid]; if (!bank->m4u_dom) { /* Initialize the M4U HW for each a BANK */
ret = pm_runtime_resume_and_get(m4udev); if (ret < 0) {
dev_err(m4udev, "pm get fail(%d) in attach.\n", ret); goto err_unlock;
}
ret = mtk_iommu_hw_init(data, bankid); if (ret) {
pm_runtime_put(m4udev); goto err_unlock;
}
bank->m4u_dom = dom;
writel(dom->cfg.arm_v7s_cfg.ttbr, bank->base + REG_MMU_PT_BASE_ADDR);
if (region_id > 0) {
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(34)); if (ret) {
dev_err(m4udev, "Failed to set dma_mask for %s(%d).\n", dev_name(dev), ret); return ret;
}
}
pa = dom->iop->iova_to_phys(dom->iop, iova); if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT) &&
dom->bank->parent_data->enable_4GB &&
pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE)
pa &= ~BIT_ULL(32);
if (!MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) return &data->iommu;
/* * Link the consumer device with the smi-larb device(supplier). * The device that connects with each a larb is a independent HW. * All the ports in each a device should be in the same larbs.
*/
larbid = MTK_M4U_TO_LARB(fwspec->ids[0]); if (larbid >= MTK_LARB_NR_MAX) return ERR_PTR(-EINVAL);
for (i = 1; i < fwspec->num_ids; i++) {
larbidx = MTK_M4U_TO_LARB(fwspec->ids[i]); if (larbid != larbidx) {
dev_err(dev, "Can only use one larb. Fail@larb%d-%d.\n",
larbid, larbidx); return ERR_PTR(-EINVAL);
}
}
larbdev = data->larb_imu[larbid].dev; if (!larbdev) return ERR_PTR(-EINVAL);
link = device_link_add(dev, larbdev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); if (!link)
dev_err(dev, "Unable to link %s\n", dev_name(larbdev)); return &data->iommu;
}
/* * If the bank function is enabled, each bank is a iommu group/domain. * Otherwise, each iova region is a iommu group/domain.
*/
bankid = mtk_iommu_get_bank_id(dev, plat_data); if (bankid) return bankid;
if ((int)regionid < 0) return;
curdom = data->plat_data->iova_region + regionid; for (i = 0; i < data->plat_data->iova_region_nr; i++) {
resv = data->plat_data->iova_region + i;
/* Only reserve when the region is inside the current domain */ if (resv->iova_base <= curdom->iova_base ||
resv->iova_base + resv->size >= curdom->iova_base + curdom->size) continue;
region = iommu_alloc_resv_region(resv->iova_base, resv->size,
prot, IOMMU_RESV_RESERVED,
GFP_KERNEL); if (!region) return;
/* * Global control settings are in bank0. May re-init these global registers * since no sure if there is bank0 consumers.
*/ if (MTK_IOMMU_HAS_FLAG(data->plat_data, TF_PORT_TO_ADDR_MT8173)) {
regval = F_MMU_PREFETCH_RT_REPLACE_MOD |
F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173;
} else {
regval = readl_relaxed(bank0->base + REG_MMU_CTRL_REG);
regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR;
}
writel_relaxed(regval, bank0->base + REG_MMU_CTRL_REG);
if (data->enable_4GB &&
MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { /* * If 4GB mode is enabled, the validate PA range is from * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
*/
regval = F_MMU_VLD_PA_RNG(7, 4);
writel_relaxed(regval, bank0->base + REG_MMU_VLD_PA_RNG);
} if (MTK_IOMMU_HAS_FLAG(data->plat_data, DCM_DISABLE))
writel_relaxed(F_MMU_DCM, bank0->base + REG_MMU_DCM_DIS); else
writel_relaxed(0, bank0->base + REG_MMU_DCM_DIS);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { /* The register is called STANDARD_AXI_MODE in this case */
regval = 0;
} else {
regval = readl_relaxed(bank0->base + REG_MMU_MISC_CTRL); if (!MTK_IOMMU_HAS_FLAG(data->plat_data, STD_AXI_MODE))
regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN))
regval &= ~F_MMU_IN_ORDER_WR_EN_MASK;
}
writel_relaxed(regval, bank0->base + REG_MMU_MISC_CTRL);
/* Independent settings for each bank */
regval = F_L2_MULIT_HIT_EN |
F_TABLE_WALK_FAULT_INT_EN |
F_PREETCH_FIFO_OVERFLOW_INT_EN |
F_MISS_FIFO_OVERFLOW_INT_EN |
F_PREFETCH_FIFO_ERR_INT_EN |
F_MISS_FIFO_ERR_INT_EN;
writel_relaxed(regval, bankx->base + REG_MMU_INT_CONTROL0);
for (i = 0; i < larb_nr; i++) { struct device_node *smicomm_node, *smi_subcomm_node;
u32 id;
larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); if (!larbnode) {
ret = -EINVAL; goto err_larbdev_put;
}
if (!of_device_is_available(larbnode)) {
of_node_put(larbnode); continue;
}
ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); if (ret)/* The id is consecutive if there is no this property */
id = i; if (id >= MTK_LARB_NR_MAX) {
of_node_put(larbnode);
ret = -EINVAL; goto err_larbdev_put;
}
plarbdev = of_find_device_by_node(larbnode);
of_node_put(larbnode); if (!plarbdev) {
ret = -ENODEV; goto err_larbdev_put;
} if (data->larb_imu[id].dev) {
platform_device_put(plarbdev);
ret = -EEXIST; goto err_larbdev_put;
}
data->larb_imu[id].dev = &plarbdev->dev;
if (!plarbdev->dev.driver) {
ret = -EPROBE_DEFER; goto err_larbdev_put;
}
/* Get smi-(sub)-common dev from the last larb. */
smi_subcomm_node = of_parse_phandle(larbnode, "mediatek,smi", 0); if (!smi_subcomm_node) {
ret = -EINVAL; goto err_larbdev_put;
}
/* * It may have two level smi-common. the node is smi-sub-common if it * has a new mediatek,smi property. otherwise it is smi-commmon.
*/
smicomm_node = of_parse_phandle(smi_subcomm_node, "mediatek,smi", 0); if (smicomm_node)
of_node_put(smi_subcomm_node); else
smicomm_node = smi_subcomm_node;
/* * All the larbs that connect to one IOMMU must connect with the same * smi-common.
*/ if (!frst_avail_smicomm_node) {
frst_avail_smicomm_node = smicomm_node;
} elseif (frst_avail_smicomm_node != smicomm_node) {
dev_err(dev, "mediatek,smi property is not right @larb%d.", id);
of_node_put(smicomm_node);
ret = -EINVAL; goto err_larbdev_put;
} else {
of_node_put(smicomm_node);
}
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM;
data->dev = dev;
data->plat_data = of_device_get_match_data(dev);
/* Protect memory. HW will access here while translation fault.*/
protect = devm_kcalloc(dev, 2, MTK_PROTECT_PA_ALIGN, GFP_KERNEL); if (!protect) return -ENOMEM;
data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) {
infracfg = syscon_regmap_lookup_by_phandle(dev->of_node, "mediatek,infracfg"); if (IS_ERR(infracfg)) { /* * Legacy devicetrees will not specify a phandle to * mediatek,infracfg: in that case, we use the older * way to retrieve a syscon to infra. * * This is for retrocompatibility purposes only, hence * no more compatibles shall be added to this.
*/ switch (data->plat_data->m4u_plat) { case M4U_MT2712:
p = "mediatek,mt2712-infracfg"; break; case M4U_MT8173:
p = "mediatek,mt8173-infracfg"; break; default:
p = NULL;
}
infracfg = syscon_regmap_lookup_by_compatible(p); if (IS_ERR(infracfg)) return PTR_ERR(infracfg);
}
ret = regmap_read(infracfg, REG_INFRA_MISC, &val); if (ret) return ret;
data->enable_4GB = !!(val & F_DDR_4GB_SUPPORT_EN);
}
banks_num = data->plat_data->banks_num;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; if (resource_size(res) < banks_num * MTK_IOMMU_BANK_SZ) {
dev_err(dev, "banknr %d. res %pR is not enough.\n", banks_num, res); return -EINVAL;
}
base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base);
ioaddr = res->start;
do { if (!data->plat_data->banks_enable[i]) continue;
bank = &data->bank[i];
bank->id = i;
bank->base = base + i * MTK_IOMMU_BANK_SZ;
bank->m4u_dom = NULL;
if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) {
data->bclk = devm_clk_get(dev, "bclk"); if (IS_ERR(data->bclk)) return PTR_ERR(data->bclk);
}
if (MTK_IOMMU_HAS_FLAG(data->plat_data, PGTABLE_PA_35_EN)) {
ret = dma_set_mask(dev, DMA_BIT_MASK(35)); if (ret) {
dev_err(dev, "Failed to set dma_mask 35.\n"); return ret;
}
}
pm_runtime_enable(dev);
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
ret = mtk_iommu_mm_dts_parse(dev, &match, data); if (ret) {
dev_err_probe(dev, ret, "mm dts parse fail\n"); goto out_runtime_disable;
}
} elseif (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_INFRA) &&
!MTK_IOMMU_HAS_FLAG(data->plat_data, CFG_IFA_MASTER_IN_ATF)) {
p = data->plat_data->pericfg_comp_str;
data->pericfg = syscon_regmap_lookup_by_compatible(p); if (IS_ERR(data->pericfg)) {
ret = PTR_ERR(data->pericfg); goto out_runtime_disable;
}
}
if (MTK_IOMMU_IS_TYPE(data->plat_data, MTK_IOMMU_TYPE_MM)) {
device_link_remove(data->smicomm_dev, &pdev->dev);
component_master_del(&pdev->dev, &mtk_iommu_com_ops);
}
pm_runtime_disable(&pdev->dev); for (i = 0; i < data->plat_data->banks_num; i++) {
bank = &data->bank[i]; if (!bank->m4u_dom) continue;
devm_free_irq(&pdev->dev, bank->irq, bank);
}
}
ret = clk_prepare_enable(data->bclk); if (ret) {
dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); return ret;
}
/* * Uppon first resume, only enable the clk and return, since the values of the * registers are not yet set.
*/ if (!reg->wr_len_ctrl) return 0;
base = data->bank[i].base;
writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL);
writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL);
writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); do {
m4u_dom = data->bank[i].m4u_dom; if (!data->plat_data->banks_enable[i] || !m4u_dom) continue;
base = data->bank[i].base;
writel_relaxed(reg->int_control[i], base + REG_MMU_INT_CONTROL0);
writel_relaxed(reg->int_main_control[i], base + REG_MMU_INT_MAIN_CONTROL);
writel_relaxed(reg->ivrp_paddr[i], base + REG_MMU_IVRP_PADDR);
writel(m4u_dom->cfg.arm_v7s_cfg.ttbr, base + REG_MMU_PT_BASE_ADDR);
} while (++i < data->plat_data->banks_num);
/* * Users may allocate dma buffer before they call pm_runtime_get, * in which case it will lack the necessary tlb flush. * Thus, make sure to update the tlb after each PM resume.
*/
mtk_iommu_tlb_flush_all(data); return 0;
}
MODULE_DESCRIPTION("IOMMU API for MediaTek M4U implementations");
MODULE_LICENSE("GPL v2");
Messung V0.5
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.17Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.