/* * NOTE: You may see the term "BAR" in a number of register names used by * this driver. The term is an artifact of when the HW core was an * endpoint device (EP). Now it is a root complex (RC) and anywhere a * register has the term "BAR" it is related to an inbound window.
*/
/* * The RESCAL block is tied to PCIe controller #1, regardless of the number of * controllers, and turning off PCIe controller #1 prevents access to the RESCAL * register blocks, therefore no other controller can access this register * space, and depending upon the bus fabric we may get a timeout (UBUS/GISB), * or a hang (AXI).
*/ #define CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN BIT(0)
struct brcm_msi { struct device *dev; void __iomem *base; struct device_node *np; struct irq_domain *inner_domain; struct mutex lock; /* guards the alloc/free operations */
u64 target_addr; int irq;
DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR); bool legacy; /* Some chips have MSIs in bits [31..24] of a shared register. */ int legacy_shift; int nr; /* No. of MSI available, depends on chip */ /* This is the base pointer for interrupt status/set/clr regs */ void __iomem *intr_base;
};
/* * This is to convert the size of the inbound "BAR" region to the * non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
*/ staticint brcm_pcie_encode_ibar_size(u64 size)
{ int log2_in = ilog2(size);
if (log2_in >= 12 && log2_in <= 15) /* Covers 4KB to 32KB (inclusive) */ return (log2_in - 12) + 0x1c; elseif (log2_in >= 16 && log2_in <= 36) /* Covers 64KB to 64GB, (inclusive) */ return log2_in - 15; /* Something is awry so disable */ return 0;
}
static u32 brcm_pcie_mdio_form_pkt(int port, int regad, int cmd)
{
u32 pkt = 0;
/* Set the base of the pcie_addr window */
writel(lower_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_LO(win));
writel(upper_32_bits(pcie_addr), pcie->base + PCIE_MEM_WIN0_HI(win));
/* Write the addr base & limit lower bits (in MBs) */
cpu_addr_mb = cpu_addr / SZ_1M;
limit_addr_mb = (cpu_addr + size - 1) / SZ_1M;
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, (irq_hw_number_t)hwirq + i,
&brcm_msi_bottom_irq_chip, domain->host_data,
handle_edge_irq, NULL, NULL); return 0;
}
/* * The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI * enable, which we set to 1.
*/
writel(lower_32_bits(msi->target_addr) | 0x1,
msi->base + PCIE_MISC_MSI_BAR_CONFIG_LO);
writel(upper_32_bits(msi->target_addr),
msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
/* * Sanity check to make sure that the 'used' bitmap in struct brcm_msi * is large enough.
*/
BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
/* The controller is capable of serving in both RC and EP roles */ staticbool brcm_pcie_rc_mode(struct brcm_pcie *pcie)
{ void __iomem *base = pcie->base;
u32 val = readl(base + PCIE_MISC_PCIE_STATUS);
/* Accesses to the RC go right to the RC registers if !devfn */ if (pci_is_root_bus(bus)) return devfn ? NULL : base + PCIE_ECAM_REG(where);
/* An access to our HW w/o link-up will cause a CPU Abort */ if (!brcm_pcie_link_up(pcie)) return NULL;
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
writel(idx, base + IDX_ADDR(pcie)); return base + DATA_ADDR(pcie) + PCIE_ECAM_REG(where);
}
/* Accesses to the RC go right to the RC registers if !devfn */ if (pci_is_root_bus(bus)) return devfn ? NULL : base + PCIE_ECAM_REG(where);
/* An access to our HW w/o link-up will cause a CPU Abort */ if (!brcm_pcie_link_up(pcie)) return NULL;
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, where);
writel(idx, base + IDX_ADDR(pcie)); return base + DATA_ADDR(pcie);
}
staticint brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT; int ret = 0;
if (pcie->bridge_reset) { if (val)
ret = reset_control_assert(pcie->bridge_reset); else
ret = reset_control_deassert(pcie->bridge_reset);
if (ret)
dev_err(pcie->dev, "failed to %s 'bridge' reset, err=%d\n",
val ? "assert" : "deassert", ret);
/* Perst bit has moved and assert value is 0 */
tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
/* * The HW registers (and PCIe) use order-1 numbering for BARs. As such, * we have inbound_wins[0] unused and BAR1 starts at inbound_wins[1].
*/ struct inbound_win *b_begin = &inbound_wins[1]; struct inbound_win *b = b_begin;
/* * STB chips beside 7712 disable the first inbound window default. * Rather being mapped to system memory it is mapped to the * internal registers of the SoC. This feature is deprecated, has * security considerations, and is not implemented in our modern * SoCs.
*/ if (pcie->cfg->soc_base != BCM7712)
add_inbound_win(b++, &n, 0, 0, 0);
size = resource_size(entry->res);
tot_size += size; if (pcie_start < lowest_pcie_addr)
lowest_pcie_addr = pcie_start; /* * 7712 and newer chips may have many BARs, with each * offering a non-overlapping viewport to system memory. * That being said, each BARs size must still be a power of * two.
*/ if (pcie->cfg->soc_base == BCM7712)
add_inbound_win(b++, &n, size, cpu_start, pcie_start);
if (n > pcie->cfg->num_inbound_wins) break;
}
if (lowest_pcie_addr == ~(u64)0) {
dev_err(dev, "DT node has no dma-ranges\n"); return -EINVAL;
}
/* * 7712 and newer chips do not have an internal memory mapping system * that enables multiple memory controllers. As such, it can return * now w/o doing special configuration.
*/ if (pcie->cfg->soc_base == BCM7712) return n;
ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
PCIE_BRCM_MAX_MEMC); if (ret <= 0) { /* Make an educated guess */
pcie->num_memc = 1;
pcie->memc_size[0] = 1ULL << fls64(tot_size - 1);
} else {
pcie->num_memc = ret;
}
/* Each memc is viewed through a "port" that is a power of 2 */ for (i = 0, size = 0; i < pcie->num_memc; i++)
size += pcie->memc_size[i];
/* Our HW mandates that the window size must be a power of 2 */
size = 1ULL << fls64(size - 1);
/* * For STB chips, the BAR2 cpu_addr is hardwired to the start * of system memory, so we set it to 0.
*/
cpu_addr = 0;
pci_offset = lowest_pcie_addr;
/* * We validate the inbound memory view even though we should trust * whatever the device-tree provides. This is because of an HW issue on * early Raspberry Pi 4's revisions (bcm2711). It turns out its * firmware has to dynamically edit dma-ranges due to a bug on the * PCIe controller integration, which prohibits any access above the * lower 3GB of memory. Given this, we decided to keep the dma-ranges * in check, avoiding hard to debug device-tree related issues in the * future: * * The PCIe host controller by design must set the inbound viewport to * be a contiguous arrangement of all of the system's memory. In * addition, its size must be a power of two. To further complicate * matters, the viewport must start on a pcie-address that is aligned * on a multiple of its size. If a portion of the viewport does not * represent system memory -- e.g. 3GB of memory requires a 4GB * viewport -- we can map the outbound memory in or after 3GB and even * though the viewport will overlap the outbound memory the controller * will know to send outbound memory downstream and everything else * upstream. * * For example: * * - The best-case scenario, memory up to 3GB, is to place the inbound * region in the first 4GB of pcie-space, as some legacy devices can * only address 32bits. We would also like to put the MSI under 4GB * as well, since some devices require a 32bit MSI target address. * * - If the system memory is 4GB or larger we cannot start the inbound * region at location 0 (since we have to allow some space for * outbound memory @ 3GB). So instead it will start at the 1x * multiple of its size
*/ if (!size || (pci_offset & (size - 1)) ||
(pci_offset < SZ_4G && pci_offset > SZ_2G)) {
dev_err(dev, "Invalid inbound_win2_offset/size: size 0x%llx, off 0x%llx\n",
size, pci_offset); return -EINVAL;
}
/* Enable inbound window 2, the main inbound window for STB chips */
add_inbound_win(b++, &n, size, cpu_addr, pci_offset);
/* * Disable inbound window 3. On some chips presents the same * window as #2 but the data appears in a settable endianness.
*/
add_inbound_win(b++, &n, 0, 0, 0);
/* Write low */
writel_relaxed(tmp, base + reg_offset); /* Write high */
writel_relaxed(upper_32_bits(pci_offset), base + reg_offset + 4);
/* * Most STB chips: * Do nothing. * 7712: * All of their BARs need to be set.
*/ if (pcie->cfg->soc_base == BCM7712) { /* BUS remap register settings */
reg_offset = brcm_ubus_reg_offset(i);
tmp = lower_32_bits(cpu_addr) & ~0xfff;
tmp |= PCIE_MISC_UBUS_BAR1_CONFIG_REMAP_ACCESS_EN_MASK;
writel_relaxed(tmp, base + reg_offset);
tmp = upper_32_bits(cpu_addr);
writel_relaxed(tmp, base + reg_offset + 4);
}
}
}
/* Reset the bridge */
ret = pcie->cfg->bridge_sw_init_set(pcie, 1); if (ret) return ret;
/* Ensure that PERST# is asserted; some bootloaders may deassert it. */ if (pcie->cfg->soc_base == BCM2711) {
ret = pcie->cfg->perst_set(pcie, 1); if (ret) {
pcie->cfg->bridge_sw_init_set(pcie, 0); return ret;
}
}
usleep_range(100, 200);
/* Take the bridge out of reset */
ret = pcie->cfg->bridge_sw_init_set(pcie, 0); if (ret) return ret;
tmp = readl(base + HARD_DEBUG(pcie)); if (is_bmips(pcie))
tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK; else
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
writel(tmp, base + HARD_DEBUG(pcie)); /* Wait for SerDes to be stable */
usleep_range(100, 200);
/* * SCB_MAX_BURST_SIZE is a two bit field. For GENERIC chips it * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
*/ if (is_bmips(pcie))
burst = 0x1; /* 256 bytes */ elseif (pcie->cfg->soc_base == BCM2711)
burst = 0x0; /* 128 bytes */ elseif (pcie->cfg->soc_base == BCM7278)
burst = 0x3; /* 512 bytes */ else
burst = 0x2; /* 512 bytes */
/* * We ideally want the MSI target address to be located in the 32bit * addressable memory area. Some devices might depend on it. This is * possible either when the inbound window is located above the lower * 4GB or when the inbound area is smaller than 4GB (taking into * account the rounding-up we're forced to perform).
*/ if (inbound_wins[2].pci_offset >= SZ_4G ||
(inbound_wins[2].size + inbound_wins[2].pci_offset) < SZ_4G)
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_LT_4GB; else
pcie->msi_target_addr = BRCM_MSI_TARGET_ADDR_GT_4GB;
/* Don't advertise L0s capability if 'aspm-no-l0s' */
aspm_support = PCIE_LINK_STATE_L1; if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
aspm_support |= PCIE_LINK_STATE_L0S;
tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
u32p_replace_bits(&tmp, aspm_support,
PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
/* 'tmp' still holds the contents of PRIV1_LINK_CAPABILITY */
num_lanes_cap = u32_get_bits(tmp, PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
num_lanes = 0;
/* * Use hardware negotiated Max Link Width value by default. If the * "num-lanes" DT property is present, assume that the chip's default * link width capability information is incorrect/undesired and use the * specified value instead.
*/ if (!of_property_read_u32(pcie->np, "num-lanes", &num_lanes) &&
num_lanes && num_lanes <= 4 && num_lanes_cap != num_lanes) {
u32p_replace_bits(&tmp, num_lanes,
PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_MAX_LINK_WIDTH_MASK);
writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
tmp = readl(base + PCIE_RC_PL_REG_PHY_CTL_1);
u32p_replace_bits(&tmp, 1,
PCIE_RC_PL_REG_PHY_CTL_1_REG_P2_POWERDOWN_ENA_NOSYNC_MASK);
writel(tmp, base + PCIE_RC_PL_REG_PHY_CTL_1);
}
/* * For config space accesses on the RC, show the right class for * a PCIe-PCIe bridge (the default setting is to be EP mode).
*/
tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
u32p_replace_bits(&tmp, 0x060400,
PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
/* bmips PCIe outbound windows have a 128MB max size */ if (nwins > BRCM_NUM_PCIE_OUT_WINS)
nwins = BRCM_NUM_PCIE_OUT_WINS; for (j = 0; j < nwins; j++, start += SZ_128M)
brcm_pcie_set_outbound_win(pcie, j, start,
start - entry->offset,
SZ_128M); break;
}
brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
res->start - entry->offset,
resource_size(res));
num_out_wins++;
}
/* PCIe->SCB endian mode for inbound window */
tmp = readl(base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
u32p_replace_bits(&tmp, PCIE_RC_CFG_VENDOR_SPECIFIC_REG1_LITTLE_ENDIAN,
PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1_ENDIAN_MODE_BAR2_MASK);
writel(tmp, base + PCIE_RC_CFG_VENDOR_VENDOR_SPECIFIC_REG1);
if (pcie->cfg->post_setup) {
ret = pcie->cfg->post_setup(pcie); if (ret < 0) return ret;
}
return 0;
}
/* * This extends the timeout period for an access to an internal bus. This * access timeout may occur during L1SS sleep periods, even without the * presence of a PCIe access.
*/ staticvoid brcm_extend_rbus_timeout(struct brcm_pcie *pcie)
{ /* TIMEOUT register is two registers before RGR1_SW_INIT_1 */ constunsignedint REG_OFFSET = PCIE_RGR1_SW_INIT_1(pcie) - 8;
u32 timeout_us = 4000000; /* 4 seconds, our setting for L1SS */
/* 7712 does not have this (RGR1) timer */ if (pcie->cfg->soc_base == BCM7712) return;
/* Each unit in timeout register is 1/216,000,000 seconds */
writel(216 * timeout_us, pcie->base + REG_OFFSET);
}
if (strcmp(mode, "no-l1ss") == 0) { /* * "no-l1ss" -- Provides Clock Power Management, L0s, and * L1, but cannot provide L1 substate (L1SS) power * savings. If the downstream device connected to the RC is * L1SS capable AND the OS enables L1SS, all PCIe traffic * may abruptly halt, potentially hanging the system.
*/
clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK; /* * We want to un-advertise L1 substates because if the OS * tries to configure the controller into using L1 substate * power savings it may fail or hang when the RC HW is in * "no-l1ss" mode.
*/
tmp = readl(pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
u32p_replace_bits(&tmp, 2, PCIE_RC_CFG_PRIV1_ROOT_CAP_L1SS_MODE_MASK);
writel(tmp, pcie->base + PCIE_RC_CFG_PRIV1_ROOT_CAP);
} elseif (strcmp(mode, "default") == 0) { /* * "default" -- Provides L0s, L1, and L1SS, but not * compliant to provide Clock Power Management; * specifically, may not be able to meet the Tclron max * timing of 400ns as specified in "Dynamic Clock Control", * section 3.2.5.2.2 of the PCIe spec. This situation is * atypical and should happen only with older devices.
*/
clkreq_cntl |= PCIE_MISC_HARD_PCIE_HARD_DEBUG_L1SS_ENABLE_MASK;
brcm_extend_rbus_timeout(pcie);
} else { /* * "safe" -- No power savings; refclk is driven by RC * unconditionally.
*/ if (strcmp(mode, "safe") != 0)
dev_err(pcie->dev, err_msg);
mode = "safe";
}
writel(clkreq_cntl, pcie->base + HARD_DEBUG(pcie));
dev_info(pcie->dev, "clkreq-mode set to %s\n", mode);
}
/* Limit the generation if specified */ if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
/* Unassert the fundamental reset */
ret = pcie->cfg->perst_set(pcie, 0); if (ret) return ret;
msleep(PCIE_RESET_CONFIG_WAIT_MS);
/* * Give the RC/EP even more time to wake up, before trying to * configure RC. Intermittently check status for link-up, up to a * total of 100ms.
*/ for (i = 0; i < 100 && !brcm_pcie_link_up(pcie); i += 5)
msleep(5);
if (!brcm_pcie_link_up(pcie)) {
dev_err(dev, "link down\n"); return -ENODEV;
}
brcm_config_clkreq(pcie);
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie); if (ret == 0)
ssc_good = true; else
dev_err(dev, "failed attempt to enter ssc mode\n");
}
sr = devm_kzalloc(dev, size, GFP_KERNEL); if (sr) {
sr->num_supplies = ARRAY_SIZE(supplies); for (i = 0; i < ARRAY_SIZE(supplies); i++)
sr->supplies[i].supply = supplies[i];
}
if (!bus->parent || !pci_is_root_bus(bus->parent)) return 0;
if (dev->of_node) {
sr = alloc_subdev_regulators(dev); if (!sr) {
dev_info(dev, "Can't allocate regulators for downstream device\n"); goto no_regulators;
}
pcie->sr = sr;
ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies); if (ret) {
dev_info(dev, "Did not get regulators, err=%d\n", ret);
pcie->sr = NULL; goto no_regulators;
}
ret = regulator_bulk_enable(sr->num_supplies, sr->supplies); if (ret) {
dev_err(dev, "Can't enable regulators for downstream device\n");
regulator_bulk_free(sr->num_supplies, sr->supplies);
pcie->sr = NULL;
}
}
if (!sr || !bus->parent || !pci_is_root_bus(bus->parent)) return;
if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
dev_err(dev, "Failed to disable regulators for downstream device\n");
regulator_bulk_free(sr->num_supplies, sr->supplies);
pcie->sr = NULL;
}
/* L23 is a low-power PCIe link state */ staticvoid brcm_pcie_enter_l23(struct brcm_pcie *pcie)
{ void __iomem *base = pcie->base; int l23, i;
u32 tmp;
/* Assert request for L23 */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, 1, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Wait up to 36 msec for L23 */
tmp = readl(base + PCIE_MISC_PCIE_STATUS);
l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK, tmp); for (i = 0; i < 15 && !l23; i++) {
usleep_range(2000, 2400);
tmp = readl(base + PCIE_MISC_PCIE_STATUS);
l23 = FIELD_GET(PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK,
tmp);
}
if (!l23)
dev_err(pcie->dev, "failed to enter low-power link state\n");
}
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie); /* Assert fundamental reset */
ret = pcie->cfg->perst_set(pcie, 1); if (ret) return ret;
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
u32p_replace_bits(&tmp, 0, PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK);
writel(tmp, base + PCIE_MISC_PCIE_CTRL);
/* Turn off SerDes */
tmp = readl(base + HARD_DEBUG(pcie));
u32p_replace_bits(&tmp, 1, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
writel(tmp, base + HARD_DEBUG(pcie));
if (!(pcie->cfg->quirks & CFG_QUIRK_AVOID_BRIDGE_SHUTDOWN)) /* Shutdown PCIe bridge */
ret = pcie->cfg->bridge_sw_init_set(pcie, 1);
if (device_may_wakeup(&dev->dev)) {
*ret = true;
dev_info(&dev->dev, "Possible wake-up device; regulators will not be disabled\n");
} return (int) *ret;
}
ret = brcm_pcie_turn_off(pcie); if (ret) return ret;
/* * If brcm_phy_stop() returns an error, just dev_err(). If we * return the error it will cause the suspend to fail and this is a * forgivable offense that will probably be erased on resume.
*/ if (brcm_phy_stop(pcie))
dev_err(dev, "Could not stop phy for suspend\n");
ret = reset_control_rearm(pcie->rescal); if (ret) {
dev_err(dev, "Could not rearm rescal reset\n"); return ret;
}
if (pcie->sr) { /* * Now turn off the regulators, but if at least one * downstream device is enabled as a wake-up source, do not * turn off regulators.
*/
pcie->ep_wakeup_capable = false;
pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
&pcie->ep_wakeup_capable); if (!pcie->ep_wakeup_capable) {
ret = regulator_bulk_disable(pcie->sr->num_supplies,
pcie->sr->supplies); if (ret) {
dev_err(dev, "Could not turn off regulators\n");
rret = reset_control_reset(pcie->rescal); if (rret)
dev_err(dev, "failed to reset 'rascal' controller ret=%d\n",
rret); return ret;
}
}
}
clk_disable_unprepare(pcie->clk);
ret = brcm_pcie_setup(pcie); if (ret) goto err_reset;
if (pcie->sr) { if (pcie->ep_wakeup_capable) { /* * We are resuming from a suspend. In the suspend we * did not disable the power supplies, so there is * no need to enable them (and falsely increase their * usage count).
*/
pcie->ep_wakeup_capable = false;
} else {
ret = regulator_bulk_enable(pcie->sr->num_supplies,
pcie->sr->supplies); if (ret) {
dev_err(dev, "Could not turn on regulators\n"); goto err_reset;
}
}
}
ret = brcm_pcie_start_link(pcie); if (ret) goto err_regulator;
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
return 0;
err_regulator: if (pcie->sr)
regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_reset:
rret = reset_control_rearm(pcie->rescal); if (rret)
dev_err(pcie->dev, "failed to rearm 'rescal' reset, err=%d\n", rret);
err_disable_clk:
clk_disable_unprepare(pcie->clk); return ret;
}
staticvoid __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie); if (brcm_phy_stop(pcie))
dev_err(pcie->dev, "Could not stop phy\n"); if (reset_control_rearm(pcie->rescal))
dev_err(pcie->dev, "Could not rearm rescal reset\n");
clk_disable_unprepare(pcie->clk);
}
pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal"); if (IS_ERR(pcie->rescal)) return PTR_ERR(pcie->rescal);
pcie->perst_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "perst"); if (IS_ERR(pcie->perst_reset)) return PTR_ERR(pcie->perst_reset);
pcie->bridge_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "bridge"); if (IS_ERR(pcie->bridge_reset)) return PTR_ERR(pcie->bridge_reset);
pcie->swinit_reset = devm_reset_control_get_optional_exclusive(&pdev->dev, "swinit"); if (IS_ERR(pcie->swinit_reset)) return PTR_ERR(pcie->swinit_reset);
ret = clk_prepare_enable(pcie->clk); if (ret) return dev_err_probe(&pdev->dev, ret, "could not enable clock\n");
pcie->cfg->bridge_sw_init_set(pcie, 0);
if (pcie->swinit_reset) {
ret = reset_control_assert(pcie->swinit_reset); if (ret) {
clk_disable_unprepare(pcie->clk); return dev_err_probe(&pdev->dev, ret, "could not assert reset 'swinit'\n");
}
/* HW team recommends 1us for proper sync and propagation of reset */
udelay(1);
ret = reset_control_deassert(pcie->swinit_reset); if (ret) {
clk_disable_unprepare(pcie->clk); return dev_err_probe(&pdev->dev, ret, "could not de-assert reset 'swinit'\n");
}
}
ret = reset_control_reset(pcie->rescal); if (ret) {
clk_disable_unprepare(pcie->clk); return dev_err_probe(&pdev->dev, ret, "failed to deassert 'rescal'\n");
}
ret = brcm_phy_start(pcie); if (ret) {
reset_control_rearm(pcie->rescal);
clk_disable_unprepare(pcie->clk); return ret;
}
ret = brcm_pcie_setup(pcie); if (ret) goto fail;
pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION); if (pcie->cfg->soc_base == BCM4908 &&
pcie->hw_rev >= BRCM_PCIE_HW_REV_3_20) {
dev_err(pcie->dev, "hardware revision with unsupported PERST# setup\n");
ret = -ENODEV; goto fail;
}
if (pci_msi_enabled()) { struct device_node *msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
if (msi_np == pcie->np)
ret = brcm_pcie_enable_msi(pcie);
of_node_put(msi_np);
if (ret) {
dev_err(pcie->dev, "probe of internal MSI failed"); goto fail;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.