/** * struct iproc_pcie_ib_map - iProc PCIe inbound mapping controller-specific * parameters * @type: inbound mapping region type * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or * SZ_1G * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or * GB, depending on the size unit * @nr_sizes: number of supported inbound mapping region sizes * @nr_windows: number of supported inbound mapping windows for the region * @imap_addr_offset: register offset between the upper and lower 32-bit * IMAP address registers * @imap_window_offset: register offset between each IMAP window
*/ struct iproc_pcie_ib_map { enum iproc_pcie_ib_map_type type; unsignedint size_unit;
resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; unsignedint nr_sizes; unsignedint nr_windows;
u16 imap_addr_offset;
u16 imap_window_offset;
};
/* * iProc PCIe host registers
*/ enum iproc_pcie_reg { /* clock/reset signal control */
IPROC_PCIE_CLK_CTRL = 0,
/* * To allow MSI to be steered to an external MSI controller (e.g., ARM * GICv3 ITS)
*/
IPROC_PCIE_MSI_GIC_MODE,
/* * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the * window where the MSI posted writes are written, for the writes to be * interpreted as MSI writes.
*/
IPROC_PCIE_MSI_BASE_ADDR,
IPROC_PCIE_MSI_WINDOW_SIZE,
/* * To hold the address of the register where the MSI writes are * programmed. When ARM GICv3 ITS is used, this should be programmed * with the address of the GITS_TRANSLATER register.
*/
IPROC_PCIE_MSI_ADDR_LO,
IPROC_PCIE_MSI_ADDR_HI,
/* enable MSI */
IPROC_PCIE_MSI_EN_CFG,
/* allow access to root complex configuration space */
IPROC_PCIE_CFG_IND_ADDR,
IPROC_PCIE_CFG_IND_DATA,
/* allow access to device configuration space */
IPROC_PCIE_CFG_ADDR,
IPROC_PCIE_CFG_DATA,
/* * List of device IDs of controllers that have corrupted capability list that * require SW fixup
*/ staticconst u16 iproc_pcie_corrupt_cap_did[] = {
0x16cd,
0x16f0,
0xd802,
0xd804
};
/* * APB error forwarding can be disabled during access of configuration * registers of the endpoint device, to prevent unsupported requests * (typically seen during enumeration with multi-function devices) from * triggering a system exception.
*/ staticinlinevoid iproc_pcie_apb_err_disable(struct pci_bus *bus, bool disable)
{ struct iproc_pcie *pcie = iproc_data(bus);
u32 val;
if (bus->number && pcie->has_apb_err_disable) {
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); if (disable)
val &= ~APB_ERR_EN; else
val |= APB_ERR_EN;
iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val);
}
}
/* * As per PCIe r6.0, sec 2.3.2, Config RRS Software Visibility only * affects config reads of the Vendor ID. For config writes or any * other config reads, the Root may automatically reissue the * configuration request again as a new request. * * For config reads, this hardware returns CFG_RETRY_STATUS data * when it receives a RRS completion, regardless of the address of * the read or the RRS Software Visibility Enable bit. As a * partial workaround for this, we retry in software any read that * returns CFG_RETRY_STATUS. * * Note that a non-Vendor ID config register may have a value of * CFG_RETRY_STATUS. If we read that, we can't distinguish it from * a RRS completion, so we will incorrectly retry the read and * eventually return the wrong data (0xffffffff).
*/
data = readl(cfg_data_p); while (data == CFG_RETRY_STATUS && timeout--) { /* * RRS state is set in CFG_RD status register * This will handle the case where CFG_RETRY_STATUS is * valid config data.
*/
status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS); if (status != CFG_RD_RRS) return data;
udelay(1);
data = readl(cfg_data_p);
}
if (data == CFG_RETRY_STATUS)
data = 0xffffffff;
return data;
}
staticvoid iproc_pcie_fix_cap(struct iproc_pcie *pcie, int where, u32 *val)
{
u32 i, dev_id;
/* * Activate fixup for those controllers that have corrupted * capability list registers
*/ for (i = 0; i < ARRAY_SIZE(iproc_pcie_corrupt_cap_did); i++) if (dev_id == iproc_pcie_corrupt_cap_did[i])
pcie->fix_paxc_cap = true; break;
case IPROC_PCI_PM_CAP: if (pcie->fix_paxc_cap) { /* advertise PM, force next capability to PCIe */
*val &= ~IPROC_PCI_PM_CAP_MASK;
*val |= IPROC_PCI_EXP_CAP << 8 | PCI_CAP_ID_PM;
} break;
case IPROC_PCI_EXP_CAP: if (pcie->fix_paxc_cap) { /* advertise root port, version 2, terminate here */
*val = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2) << 16 |
PCI_CAP_ID_EXP;
} break;
case IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL: /* Don't advertise RRS SV support */
*val &= ~(PCI_EXP_RTCAP_RRS_SV << 16); break;
default: break;
}
}
staticint iproc_pcie_config_read(struct pci_bus *bus, unsignedint devfn, int where, int size, u32 *val)
{ struct iproc_pcie *pcie = iproc_data(bus); unsignedint busno = bus->number; void __iomem *cfg_data_p; unsignedint data; int ret;
/* root complex access */ if (busno == 0) {
ret = pci_generic_config_read32(bus, devfn, where, size, val); if (ret == PCIBIOS_SUCCESSFUL)
iproc_pcie_fix_cap(pcie, where, val);
/* * For PAXC and PAXCv2, the total number of PFs that one can enumerate * depends on the firmware configuration. Unfortunately, due to an ASIC * bug, unconfigured PFs cannot be properly hidden from the root * complex. As a result, write access to these PFs will cause bus lock * up on the embedded processor * * Since all unconfigured PFs are left with an incorrect, staled device * ID of 0x168e (PCI_DEVICE_ID_NX2_57810), we try to catch those access * early here and reject them all
*/ #define DEVICE_ID_MASK 0xffff0000 #define DEVICE_ID_SHIFT 16 if (pcie->rej_unconfig_pf &&
(where & CFG_ADDR_REG_NUM_MASK) == PCI_VENDOR_ID) if ((*val & DEVICE_ID_MASK) ==
(PCI_DEVICE_ID_NX2_57810 << DEVICE_ID_SHIFT)) return PCIBIOS_FUNC_NOT_SUPPORTED;
return PCIBIOS_SUCCESSFUL;
}
/* * Note access to the configuration registers are protected at the higher layer * by 'pci_lock' in drivers/pci/access.c
*/ staticvoid __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, int busno, unsignedint devfn, int where)
{
u16 offset;
/* root complex access */ if (busno == 0) { if (PCIE_ECAM_DEVFN(devfn) > 0) return NULL;
iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
where & CFG_IND_ADDR_MASK);
offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); if (iproc_pcie_reg_is_invalid(offset)) return NULL; else return (pcie->base + offset);
}
/* * PAXC and the internal emulated endpoint device downstream should not * be reset. If firmware has been loaded on the endpoint device at an * earlier boot stage, reset here causes issues.
*/ if (pcie->ep_is_internal) return;
if (assert) {
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST &
~RC_PCIE_RST_OUTPUT;
iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
udelay(250);
} else {
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL);
val |= RC_PCIE_RST_OUTPUT;
iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val);
msleep(100);
}
}
int iproc_pcie_shutdown(struct iproc_pcie *pcie)
{
iproc_pcie_perst_ctrl(pcie, true);
msleep(500);
/* * PAXC connects to emulated endpoint devices directly and does not * have a Serdes. Therefore skip the link detection logic here.
*/ if (pcie->ep_is_internal) return 0;
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) {
dev_err(dev, "PHY or data link is INACTIVE!\n"); return -ENODEV;
}
/* make sure we are not in EP mode */
iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); if ((hdr_type & PCI_HEADER_TYPE_MASK) != PCI_HEADER_TYPE_BRIDGE) {
dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); return -EFAULT;
}
/* force class to PCI_CLASS_BRIDGE_PCI_NORMAL (0x060400) */ #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c #define PCI_BRIDGE_CTRL_REG_CLASS_MASK 0xffffff
iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
4, &class); class &= ~PCI_BRIDGE_CTRL_REG_CLASS_MASK; class |= PCI_CLASS_BRIDGE_PCI_NORMAL;
iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET,
4, class);
/* check link status to see if link is active */
iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA,
2, &link_status); if (link_status & PCI_EXP_LNKSTA_NLW)
link_is_active = true;
/* * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based * on window index.
*/
oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0,
window_idx));
omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0,
window_idx)); if (iproc_pcie_reg_is_invalid(oarr_offset) ||
iproc_pcie_reg_is_invalid(omap_offset)) return -EINVAL;
/* * Program the OARR registers. The upper 32-bit OARR register is * always right after the lower 32-bit OARR register.
*/
writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) |
OARR_VALID, pcie->base + oarr_offset);
writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4);
/* now program the OMAP registers */
writel(lower_32_bits(pci_addr), pcie->base + omap_offset);
writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4);
if (axi_addr < ob->axi_offset) {
dev_err(dev, "axi address %pap less than offset %pap\n",
&axi_addr, &ob->axi_offset); return -EINVAL;
}
/* * Translate the AXI address to the internal address used by the iProc * PCIe core before programming the OARR
*/
axi_addr -= ob->axi_offset;
/* iterate through all OARR/OMAP mapping windows */ for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { conststruct iproc_pcie_ob_map *ob_map =
&pcie->ob_map[window_idx];
/* * If current outbound window is already in use, move on to the * next one.
*/ if (iproc_pcie_ob_is_valid(pcie, window_idx)) continue;
/* * Iterate through all supported window sizes within the * OARR/OMAP pair to find a match. Go through the window sizes * in a descending order.
*/ for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0;
size_idx--) {
resource_size_t window_size =
ob_map->window_sizes[size_idx] * SZ_1M;
/* * Keep iterating until we reach the last window and * with the minimal window size at index zero. In this * case, we take a compromise by mapping it using the * minimum window size that can be supported
*/ if (size < window_size) { if (size_idx > 0 || window_idx > 0) continue;
/* * For the corner case of reaching the minimal * window size that can be supported on the * last window
*/
axi_addr = ALIGN_DOWN(axi_addr, window_size);
pci_addr = ALIGN_DOWN(pci_addr, window_size);
size = window_size;
}
if (!IS_ALIGNED(axi_addr, window_size) ||
!IS_ALIGNED(pci_addr, window_size)) {
dev_err(dev, "axi %pap or pci %pap not aligned\n",
&axi_addr, &pci_addr); return -EINVAL;
}
/* * Match found! Program both OARR and OMAP and mark * them as a valid entry.
*/
ret = iproc_pcie_ob_write(pcie, window_idx, size_idx,
axi_addr, pci_addr); if (ret) goto err_ob;
size -= window_size; if (size == 0) return 0;
/* * If we are here, we are done with the current window, * but not yet finished all mappings. Need to move on * to the next window.
*/
axi_addr += window_size;
pci_addr += window_size; break;
}
}
/* * Program the IARR registers. The upper 32-bit IARR register is * always right after the lower 32-bit IARR register.
*/
writel(lower_32_bits(pci_addr) | BIT(size_idx),
pcie->base + iarr_offset);
writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4);
dev_dbg(dev, "iarr lo 0x%x iarr hi 0x%x\n",
readl(pcie->base + iarr_offset),
readl(pcie->base + iarr_offset + 4));
/* * Now program the IMAP registers. Each IARR region may have one or * more IMAP windows.
*/
size >>= ilog2(nr_windows); for (window_idx = 0; window_idx < nr_windows; window_idx++) {
val = readl(pcie->base + imap_offset);
val |= lower_32_bits(axi_addr) | IMAP_VALID;
writel(val, pcie->base + imap_offset);
writel(upper_32_bits(axi_addr),
pcie->base + imap_offset + ib_map->imap_addr_offset);
dev_dbg(dev, "imap window [%d] lo 0x%x hi 0x%x\n",
window_idx, readl(pcie->base + imap_offset),
readl(pcie->base + imap_offset +
ib_map->imap_addr_offset));
/* iterate through all IARR mapping regions */ for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { conststruct iproc_pcie_ib_map *ib_map =
&pcie->ib_map[region_idx];
/* * If current inbound region is already in use or not a * compatible type, move on to the next.
*/ if (iproc_pcie_ib_is_in_use(pcie, region_idx) ||
!iproc_pcie_ib_check_type(ib_map, type)) continue;
/* iterate through all supported region sizes to find a match */ for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) {
resource_size_t region_size =
ib_map->region_sizes[size_idx] * ib_map->size_unit;
if (size != region_size) continue;
if (!IS_ALIGNED(axi_addr, region_size) ||
!IS_ALIGNED(pci_addr, region_size)) {
dev_err(dev, "axi %pap or pci %pap not aligned\n",
&axi_addr, &pci_addr); return -EINVAL;
}
/* Match found! Program IARR and all IMAP windows. */
ret = iproc_pcie_ib_write(pcie, region_idx, size_idx,
ib_map->nr_windows, axi_addr,
pci_addr, size); if (ret) goto err_ib; else return 0;
}
}
ret = -EINVAL;
err_ib:
dev_err(dev, "unable to configure inbound mapping\n");
dev_err(dev, "axi %pap, pci %pap, res size %pap\n",
&axi_addr, &pci_addr, &size);
return ret;
}
staticint iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie)
{ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); struct resource_entry *entry; int ret = 0;
resource_list_for_each_entry(entry, &host->dma_ranges) { /* Each range entry corresponds to an inbound mapping region */
ret = iproc_pcie_setup_ib(pcie, entry, IPROC_PCIE_IB_MAP_MEM); if (ret) break;
}
/* * Check if 'msi-map' points to ARM GICv3 ITS, which is the only * supported external MSI controller that requires steering.
*/ if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) {
dev_err(dev, "unable to find compatible MSI controller\n"); return -ENODEV;
}
/* derive GITS_TRANSLATER address from GICv3 */
ret = of_address_to_resource(msi_node, 0, &res); if (ret < 0) {
dev_err(dev, "unable to obtain MSI controller resources\n"); return ret;
}
if (!enable) { /* * Disable PAXC MSI steering. All write transfers will be * treated as non-MSI transfers
*/
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
val &= ~MSI_ENABLE_CFG;
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); return;
}
/* * Program bits [43:13] of address of GITS_TRANSLATER register into * bits [30:0] of the MSI base address register. In fact, in all iProc * based SoCs, all I/O register bases are well below the 32-bit * boundary, so we can safely assume bits [43:32] are always zeros.
*/
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR,
(u32)(msi_addr >> 13));
/* use a default 8K window size */
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0);
/* steering MSI to GICv3 ITS */
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE);
val |= GIC_V3_CFG;
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val);
/* * Program bits [43:2] of address of GITS_TRANSLATER register into the * iProc MSI address registers.
*/
msi_addr >>= 2;
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI,
upper_32_bits(msi_addr));
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO,
lower_32_bits(msi_addr));
/* enable MSI */
val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG);
val |= MSI_ENABLE_CFG;
iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val);
}
/* * Certain revisions of the iProc PCIe controller require additional * configurations to steer the MSI writes towards an external MSI * controller.
*/ if (pcie->need_msi_steer) {
ret = iproc_pcie_msi_steer(pcie, msi_node); if (ret) goto out_put_node;
}
/* * If another MSI controller is being used, the call below should fail * but that is okay
*/
ret = iproc_msi_init(pcie, msi_node);
/* * The MSI parsing logic in certain revisions of Broadcom PAXC based root * complex does not work and needs to be disabled
*/ staticvoid quirk_paxc_disable_msi_parsing(struct pci_dev *pdev)
{ struct iproc_pcie *pcie = iproc_data(pdev->bus);
staticvoid quirk_paxc_bridge(struct pci_dev *pdev)
{ /* * The PCI config space is shared with the PAXC root port and the first * Ethernet device. So, we need to workaround this by telling the PCI * code that the bridge is not an Ethernet device.
*/ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
pdev->class = PCI_CLASS_BRIDGE_PCI_NORMAL;
/* * MPSS is not being set properly (as it is currently 0). This is * because that area of the PCI config space is hard coded to zero, and * is not modifiable by firmware. Set this to 2 (e.g., 512 byte MPS) * so that the MPS can be set to the real max value.
*/
pdev->pcie_mpss = 2;
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16cd, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x16f0, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd750, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd802, quirk_paxc_bridge);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0xd804, quirk_paxc_bridge);
MODULE_AUTHOR("Ray Jui ");
MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver");
MODULE_LICENSE("GPL v2");
Messung V0.5
¤ Dauer der Verarbeitung: 0.17 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.