/** * mtk_pcie_config_tlp_header() - Configure a configuration TLP header * @bus: PCI bus to query * @devfn: device/function number * @where: offset in config space * @size: data size in TLP header * * Set byte enable field and device information in configuration TLP header.
*/ staticvoid mtk_pcie_config_tlp_header(struct pci_bus *bus, unsignedint devfn, int where, int size)
{ struct mtk_gen3_pcie *pcie = bus->sysdata; int bytes;
u32 val;
if (remaining)
dev_warn(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
(unsignedlonglong)cpu_addr, PCIE_MAX_TRANS_TABLES);
return 0;
}
staticvoid mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
{ int i;
u32 val;
for (i = 0; i < PCIE_MSI_SET_NUM; i++) { struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
i * PCIE_MSI_SET_OFFSET;
msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
i * PCIE_MSI_SET_OFFSET;
/* Configure the MSI capture address */
writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
writel_relaxed(upper_32_bits(msi_set->msg_addr),
pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
i * PCIE_MSI_SET_ADDR_HI_OFFSET);
}
val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
val |= PCIE_MSI_SET_ENABLE;
writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val |= PCIE_MSI_ENABLE;
writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
}
/* Set as RC mode and set controller PCIe Gen speed restriction, if any */
val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
val |= PCIE_RC_MODE; if (pcie->max_link_speed) {
val &= ~PCIE_SETTING_GEN_SUPPORT;
/* Can enable link speed support only from Gen2 onwards */ if (pcie->max_link_speed >= 2)
val |= FIELD_PREP(PCIE_SETTING_GEN_SUPPORT,
GENMASK(pcie->max_link_speed - 2, 0));
} if (pcie->num_lanes) {
val &= ~PCIE_SETTING_LINK_WIDTH;
/* Zero means one lane, each bit activates x2/x4/x8/x16 */ if (pcie->num_lanes > 1)
val |= FIELD_PREP(PCIE_SETTING_LINK_WIDTH,
GENMASK(fls(pcie->num_lanes >> 2), 0));
}
writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
/* Set Link Control 2 (LNKCTL2) speed restriction, if any */ if (pcie->max_link_speed) {
val = readl_relaxed(pcie->base + PCIE_CONF_LINK2_CTL_STS);
val &= ~PCIE_CONF_LINK2_LCR2_LINK_SPEED;
val |= FIELD_PREP(PCIE_CONF_LINK2_LCR2_LINK_SPEED, pcie->max_link_speed);
writel_relaxed(val, pcie->base + PCIE_CONF_LINK2_CTL_STS);
}
/* Set class code */
val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI_NORMAL);
writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
/* Mask all INTx interrupts */
val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val &= ~PCIE_INTX_ENABLE;
writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
/* Disable DVFSRC voltage request */
val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
/* * Airoha EN7581 has a hw bug asserting/releasing PCIE_PE_RSTB signal * causing occasional PCIe link down. In order to overcome the issue, * PCIE_RSTB signals are not asserted/released at this stage and the * PCIe block is reset using en7523_reset_assert() and * en7581_pci_enable().
*/ if (!(pcie->soc->flags & SKIP_PCIE_RSTB)) { /* Assert all reset signals */
val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB |
PCIE_PE_RSTB;
writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
/* * Described in PCIe CEM specification revision 6.0. * * The deassertion of PERST# should be delayed 100ms (TPVPERL) * for the power and clock to become stable.
*/
msleep(PCIE_T_PVPERL_MS);
/* Check if the link is up or not */
err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
!!(val & PCIE_PORT_LINKUP), 20,
PCI_PM_D3COLD_WAIT * USEC_PER_MSEC); if (err) { constchar *ltssm_state; int ltssm_index;
raw_spin_lock_irqsave(&pcie->irq_lock, flags);
val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
/** * mtk_intx_eoi() - Clear INTx IRQ status at the end of interrupt * @data: pointer to chip specific data * * As an emulated level IRQ, its interrupt status will remain * until the corresponding de-assert message is received; hence that * the status can only be cleared when the interrupt has been serviced.
*/ staticvoid mtk_intx_eoi(struct irq_data *data)
{ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data); unsignedlong hwirq;
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac"); if (!regs) return -EINVAL;
pcie->base = devm_ioremap_resource(dev, regs); if (IS_ERR(pcie->base)) {
dev_err(dev, "failed to map register base\n"); return PTR_ERR(pcie->base);
}
pcie->reg_base = regs->start;
for (i = 0; i < num_resets; i++)
pcie->phy_resets[i].id = pcie->soc->phy_resets.id[i];
ret = devm_reset_control_bulk_get_optional_shared(dev, num_resets,
pcie->phy_resets); if (ret) {
dev_err(dev, "failed to get PHY bulk reset\n"); return ret;
}
pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac"); if (IS_ERR(pcie->mac_reset)) {
ret = PTR_ERR(pcie->mac_reset); if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get MAC reset\n");
return ret;
}
pcie->phy = devm_phy_optional_get(dev, "pcie-phy"); if (IS_ERR(pcie->phy)) {
ret = PTR_ERR(pcie->phy); if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get PHY\n");
return ret;
}
pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks); if (pcie->num_clks < 0) {
dev_err(dev, "failed to get clocks\n"); return pcie->num_clks;
}
ret = of_property_read_u32(dev->of_node, "num-lanes", &num_lanes); if (ret == 0) { if (num_lanes == 0 || num_lanes > 16 ||
(num_lanes != 1 && num_lanes % 2))
dev_warn(dev, "invalid num-lanes, using controller defaults\n"); else
pcie->num_lanes = num_lanes;
}
/* * The controller may have been left out of reset by the bootloader * so make sure that we get a clean start by asserting resets here.
*/
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
pcie->phy_resets);
/* Wait for the time needed to complete the reset lines assert. */
msleep(PCIE_EN7581_RESET_TIME_MS);
/* * Configure PBus base address and base address mask to allow the * hw to detect if a given address is accessible on PCIe controller.
*/
pbus_regmap = syscon_regmap_lookup_by_phandle_args(dev->of_node, "mediatek,pbus-csr",
ARRAY_SIZE(args),
args); if (IS_ERR(pbus_regmap)) return PTR_ERR(pbus_regmap);
entry = resource_list_first_type(&host->windows, IORESOURCE_MEM); if (!entry) return -ENODEV;
err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); if (err) {
dev_err(dev, "failed to prepare clock\n"); goto err_clk_prepare_enable;
}
/* * Airoha EN7581 performs PCIe reset via clk callbacks since it has a * hw issue with PCIE_PE_RSTB signal. Add wait for the time needed to * complete the PCIe reset.
*/
msleep(PCIE_T_PVPERL_MS);
/* * The controller may have been left out of reset by the bootloader * so make sure that we get a clean start by asserting resets here.
*/
reset_control_bulk_assert(pcie->soc->phy_resets.num_resets,
pcie->phy_resets);
reset_control_assert(pcie->mac_reset);
usleep_range(PCIE_MTK_RESET_TIME_US, 2 * PCIE_MTK_RESET_TIME_US);
/* PHY power on and enable pipe clock */
err = reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
pcie->phy_resets); if (err) {
dev_err(dev, "failed to deassert PHYs\n"); return err;
}
err = phy_init(pcie->phy); if (err) {
dev_err(dev, "failed to initialize PHY\n"); goto err_phy_init;
}
err = phy_power_on(pcie->phy); if (err) {
dev_err(dev, "failed to power on PHY\n"); goto err_phy_on;
}
/* MAC power on and enable transaction layer clocks */
reset_control_deassert(pcie->mac_reset);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks); if (err) {
dev_err(dev, "failed to enable clocks\n"); goto err_clk_init;
}
staticint mtk_pcie_get_controller_max_link_speed(struct mtk_gen3_pcie *pcie)
{
u32 val; int ret;
val = readl_relaxed(pcie->base + PCIE_BASE_CFG_REG);
val = FIELD_GET(PCIE_BASE_CFG_SPEED, val);
ret = fls(val);
return ret > 0 ? ret : -EINVAL;
}
staticint mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
{ int err, max_speed;
err = mtk_pcie_parse_port(pcie); if (err) return err;
/* * Deassert the line in order to avoid unbalance in deassert_count * counter since the bulk is shared.
*/
reset_control_bulk_deassert(pcie->soc->phy_resets.num_resets,
pcie->phy_resets);
/* Don't touch the hardware registers before power up */
err = pcie->soc->power_up(pcie); if (err) return err;
err = of_pci_get_max_link_speed(pcie->dev->of_node); if (err) { /* Get the maximum speed supported by the controller */
max_speed = mtk_pcie_get_controller_max_link_speed(pcie);
/* Set max_link_speed only if the controller supports it */ if (max_speed >= 0 && max_speed <= err) {
pcie->max_link_speed = err;
dev_info(pcie->dev, "maximum controller link speed Gen%d, overriding to Gen%u",
max_speed, pcie->max_link_speed);
}
}
/* Try link up */
err = mtk_pcie_startup_port(pcie); if (err) goto err_setup;
err = mtk_pcie_setup_irq(pcie); if (err) goto err_setup;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.