/* * Some device drivers need know if PCI is initiated. * Basically, we think PCI is not initiated when there * is no device to be found on the pci_bus_type.
*/ int no_pci_devices(void)
{ struct device *dev; int no_devices;
/* * Get the lowest of them to find the decode size, and from that * the extent.
*/
size = size & ~(size-1);
/* * base == maxbase can be valid only if the BAR has already been * programmed with all 1s.
*/ if (base == maxbase && ((base | (size - 1)) & mask) != mask) return 0;
if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
flags |= IORESOURCE_IO; return flags;
}
flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
flags |= IORESOURCE_MEM; if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
flags |= IORESOURCE_PREFETCH;
mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: break; case PCI_BASE_ADDRESS_MEM_TYPE_1M: /* 1M mem BAR treated as 32-bit BAR */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64:
flags |= IORESOURCE_MEM_64; break; default: /* mem unknown type treated as 32-bit BAR */ break;
} return flags;
}
/** * __pci_size_bars - Read the raw BAR mask for a range of PCI BARs * @dev: the PCI device * @count: number of BARs to size * @pos: starting config space position * @sizes: array to store mask values * @rom: indicate whether to use ROM mask, which avoids enabling ROM BARs * * Provided @sizes array must be sufficiently sized to store results for * @count u32 BARs. Caller is responsible for disabling decode to specified * BAR range around calling this function. This function is intended to avoid * disabling decode around sizing each BAR individually, which can result in * non-trivial overhead in virtualized environments with very large PCI BARs.
*/ staticvoid __pci_size_bars(struct pci_dev *dev, int count, unsignedint pos, u32 *sizes, bool rom)
{
u32 orig, mask = rom ? PCI_ROM_ADDRESS_MASK : ~0; int i;
for (i = 0; i < count; i++, pos += 4, sizes++) {
pci_read_config_dword(dev, pos, &orig);
pci_write_config_dword(dev, pos, mask);
pci_read_config_dword(dev, pos, sizes);
pci_write_config_dword(dev, pos, orig);
}
}
/** * __pci_read_base - Read a PCI BAR * @dev: the PCI device * @type: type of the BAR * @res: resource buffer to be filled in * @pos: BAR position in the config space * @sizes: array of one or more pre-read BAR masks * * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
*/ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsignedint pos, u32 *sizes)
{
u32 l = 0, sz;
u64 l64, sz64, mask64; struct pci_bus_region region, inverted_region; constchar *res_name = pci_resource_name(dev, res - dev->resource);
/* * All bits set in sz means the device isn't working properly. * If the BAR isn't implemented, all bits must be 0. If it's a * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit * 1 must be clear.
*/ if (PCI_POSSIBLE_ERROR(sz))
sz = 0;
/* * I don't know how l can have all bits set. Copied from old code. * Maybe it fixes a bug on some ancient platform.
*/ if (PCI_POSSIBLE_ERROR(l))
l = 0;
/* * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is * the corresponding resource address (the physical address used by * the CPU. Converting that resource address back to a bus address * should yield the original BAR value: * * resource_to_bus(bus_to_resource(A)) == A * * If it doesn't, CPU accesses to "bus_to_resource(A)" will not * be claimed by the device.
*/ if (inverted_region.start != region.start) {
res->flags |= IORESOURCE_UNSET;
res->start = 0;
res->end = region.end - region.start;
pci_info(dev, "%s: initial BAR value %#010llx invalid\n",
res_name, (unsignedlonglong)region.start);
}
/* * Some bridges set the base > limit by default, and some * (broken) BIOSes do not initialize them. If we find * this, just assume they are not being used.
*/ if (mem_base_hi <= mem_limit_hi) {
base64 |= (u64) mem_base_hi << 32;
limit64 |= (u64) mem_limit_hi << 32;
}
}
base = (pci_bus_addr_t) base64;
limit = (pci_bus_addr_t) limit64;
/* * DECchip 21050 pass 2 errata: the bridge may miss an address * disconnect boundary by one PCI data phase. Workaround: do not * use prefetching on this device.
*/ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) return;
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); if (!pmem) {
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
0xffe0fff0);
pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
} if (!pmem) return;
bridge->pref_window = 1;
if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
/* * Bridge claims to have a 64-bit prefetchable memory * window; verify that the upper bits are actually * writable.
*/
pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem);
pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
0xffffffff);
pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); if (tmp)
bridge->pref_64_window = 1;
}
/* * We assume we can manage these PCIe features. Some systems may * reserve these for use by the platform itself, e.g., an ACPI BIOS * may implement its own AER handling and use _OSC to prevent the * OS from interfering.
*/
bridge->native_aer = 1;
bridge->native_pcie_hotplug = 1;
bridge->native_shpc_hotplug = 1;
bridge->native_pme = 1;
bridge->native_ltr = 1;
bridge->native_dpc = 1;
bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET;
bridge->native_cxl_error = 1;
/* If the host bridge driver sets a MSI domain of the bridge, use it */
d = dev_get_msi_domain(bus->bridge);
/* * Any firmware interface that can resolve the msi_domain * should be called from here.
*/ if (!d)
d = pci_host_bridge_of_msi_domain(bus); if (!d)
d = pci_host_bridge_acpi_msi_domain(bus);
/* * If no IRQ domain was found via the OF tree, try looking it up * directly through the fwnode_handle.
*/ if (!d) { struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
if (fwnode)
d = irq_find_matching_fwnode(fwnode,
DOMAIN_BUS_PCI_MSI);
}
/* * The bus can be a root bus, a subordinate bus, or a virtual bus * created by an SR-IOV device. Walk up to the first bridge device * found or derive the domain from the host bridge.
*/ for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) { if (b->self)
d = dev_get_msi_domain(&b->self->dev);
}
if (!d)
d = pci_host_bridge_msi_domain(b);
dev_set_msi_domain(&bus->dev, d);
}
staticbool pci_preserve_config(struct pci_host_bridge *host_bridge)
{ if (pci_acpi_preserve_config(host_bridge)) returntrue;
if (host_bridge->dev.parent && host_bridge->dev.parent->of_node) return of_pci_preserve_config(host_bridge->dev.parent->of_node);
b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); if (b) { /* Ignore it if we already got here via a different bridge */
dev_dbg(&b->dev, "bus already known\n");
err = -EEXIST; goto free;
}
/* Add initial resources to the bus */
resource_list_for_each_entry_safe(window, n, &resources) {
offset = window->offset;
res = window->res; if (!res->flags && !res->start && !res->end) {
release_resource(res);
resource_list_destroy_entry(window); continue;
}
list_move_tail(&window->node, &bridge->windows);
if (res->flags & IORESOURCE_BUS)
pci_bus_insert_busn_res(bus, bus->number, res->end); else
pci_bus_add_resource(bus, res);
if (offset) { if (resource_type(res) == IORESOURCE_IO)
fmt = " (bus address [%#06llx-%#06llx])"; else
fmt = " (bus address [%#010llx-%#010llx])";
staticbool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge)
{ int pos;
u32 status;
/* * If extended config space isn't accessible on a bridge's primary * bus, we certainly can't access it on the secondary bus.
*/ if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG) returnfalse;
/* * PCIe Root Ports and switch ports are PCIe on both sides, so if * extended config space is accessible on the primary, it's also * accessible on the secondary.
*/ if (pci_is_pcie(bridge) &&
(pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM ||
pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM)) returntrue;
/* * For the other bridge types: * - PCI-to-PCI bridges * - PCIe-to-PCI/PCI-X forward bridges * - PCI/PCI-X-to-PCIe reverse bridges * extended config space on the secondary side is only accessible * if the bridge supports PCI-X Mode 2.
*/
pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); if (!pos) returnfalse;
/* * Initialize some portions of the bus device, but don't register * it now as the parent is not properly set up yet.
*/
child->dev.class = &pcibus_class;
dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
/* Set up the primary, secondary and subordinate bus numbers */
child->number = child->busn_res.start = busnr;
child->primary = parent->busn_res.start;
child->busn_res.end = 0xff;
if (!bridge) {
child->dev.parent = parent->bridge; goto add_dev;
}
/* * Check whether extended config space is accessible on the child * bus. Note that we currently assume it is always accessible on * the root bus.
*/ if (!pci_bridge_child_ext_cfg_accessible(bridge)) {
child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG;
pci_info(child, "extended config space not accessible\n");
}
/* Set up default resource pointers and names */ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
child->resource[i]->name = child->name;
}
bridge->subordinate = child;
add_dev:
pci_set_bus_msi_domain(child);
ret = device_register(&child->dev); if (WARN_ON(ret < 0)) {
put_device(&child->dev); return NULL;
}
pcibios_add_bus(child);
if (child->ops->add_bus) {
ret = child->ops->add_bus(child); if (WARN_ON(ret < 0))
dev_err(&child->dev, "failed to add bus: %d\n", ret);
}
/* Create legacy_io and legacy_mem files for this bus */
pci_create_legacy_files(child);
staticunsignedint pci_scan_child_bus_extend(struct pci_bus *bus, unsignedint available_buses); /** * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus * numbers from EA capability. * @dev: Bridge * @sec: updated with secondary bus number from EA * @sub: updated with subordinate bus number from EA * * If @dev is a bridge with EA capability that specifies valid secondary * and subordinate bus numbers, return true with the bus numbers in @sec * and @sub. Otherwise return false.
*/ staticbool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub)
{ int ea, offset;
u32 dw;
u8 ea_sec, ea_sub;
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) returnfalse;
/* find PCI EA capability in list */
ea = pci_find_capability(dev, PCI_CAP_ID_EA); if (!ea) returnfalse;
/* * pci_scan_bridge_extend() - Scan buses behind a bridge * @bus: Parent bus the bridge is on * @dev: Bridge itself * @max: Starting subordinate number of buses behind this bridge * @available_buses: Total number of buses available for this bridge and * the devices below. After the minimal bus space has * been allocated the remaining buses will be * distributed equally between hotplug-capable bridges. * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges * that need to be reconfigured. * * If it's a bridge, configure it and scan the bus behind it. * For CardBus bridges, we don't scan behind as the devices will * be handled by the bridge driver itself. * * We need to process bridges in two passes -- first we scan those * already configured by the BIOS and after we are done with all of * them, we proceed to assigning numbers to the remaining buses in * order to avoid overlaps between old and new bus numbers. * * Return: New subordinate number covering all buses behind this bridge.
*/ staticint pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, int max, unsignedint available_buses, int pass)
{ struct pci_bus *child; int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
u32 buses, i, j = 0;
u16 bctl;
u8 primary, secondary, subordinate; int broken = 0; bool fixed_buses;
u8 fixed_sec, fixed_sub; int next_busnr;
/* * Make sure the bridge is powered on to be able to access config * space of devices below it.
*/
pm_runtime_get_sync(&dev->dev);
if (!primary && (primary != bus->number) && secondary && subordinate) {
pci_warn(dev, "Primary bus is hard wired to 0\n");
primary = bus->number;
}
/* Check if setup is sensible at all */ if (!pass &&
(primary != bus->number || secondary <= bus->number ||
secondary > subordinate)) {
pci_info(dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
secondary, subordinate);
broken = 1;
}
/* * Disable Master-Abort Mode during probing to avoid reporting of * bus errors in some architectures.
*/
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
/* * Bus already configured by firmware, process it in the * first pass and just note the configuration.
*/ if (pass) goto out;
/* * The bus might already exist for two reasons: Either we * are rescanning the bus or the bus is reachable through * more than one bridge. The second case can happen with * the i450NX chipset.
*/
child = pci_find_bus(pci_domain_nr(bus), secondary); if (!child) {
child = pci_add_new_bus(bus, dev, secondary); if (!child) goto out;
child->primary = primary;
pci_bus_insert_busn_res(child, secondary, subordinate);
child->bridge_ctl = bctl;
}
buses = subordinate - secondary;
cmax = pci_scan_child_bus_extend(child, buses); if (cmax > subordinate)
pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n",
subordinate, cmax);
/* Subordinate should equal child->busn_res.end */ if (subordinate > max)
max = subordinate;
} else {
/* * We need to assign a number to this bus which we always * do in the second pass.
*/ if (!pass) { if (pcibios_assign_all_busses() || broken || is_cardbus)
/* * Temporarily disable forwarding of the * configuration cycles on all bridges in * this bus segment to avoid possible * conflicts in the second pass between two * bridges programmed with overlapping bus * ranges.
*/
pci_write_config_dword(dev, PCI_PRIMARY_BUS,
buses & ~0xffffff); goto out;
}
/* Read bus numbers from EA Capability (if present) */
fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub); if (fixed_buses)
next_busnr = fixed_sec; else
next_busnr = max + 1;
/* * Prevent assigning a bus number that already exists. * This can happen when a bridge is hot-plugged, so in this * case we only re-scan this bus.
*/
child = pci_find_bus(pci_domain_nr(bus), next_busnr); if (!child) {
child = pci_add_new_bus(bus, dev, next_busnr); if (!child) goto out;
pci_bus_insert_busn_res(child, next_busnr,
bus->busn_res.end);
}
max++; if (available_buses)
available_buses--;
/* * yenta.c forces a secondary latency timer of 176. * Copy that behaviour here.
*/ if (is_cardbus) {
buses &= ~0xff000000;
buses |= CARDBUS_LATENCY_TIMER << 24;
}
/* We need to blast all three values with a single write */
pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
if (!is_cardbus) {
child->bridge_ctl = bctl;
max = pci_scan_child_bus_extend(child, available_buses);
} else {
/* * For CardBus bridges, we leave 4 bus numbers as * cards with a PCI-to-PCI bridge can be inserted * later.
*/ for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) { struct pci_bus *parent = bus; if (pci_find_bus(pci_domain_nr(bus),
max+i+1)) break; while (parent->parent) { if ((!pcibios_assign_all_busses()) &&
(parent->busn_res.end > max) &&
(parent->busn_res.end <= max+i)) {
j = 1;
}
parent = parent->parent;
} if (j) {
/* * Often, there are two CardBus * bridges -- try to leave one * valid bus number for each one.
*/
i /= 2; break;
}
}
max += i;
}
/* * Set subordinate bus number to its real value. * If fixed subordinate bus number exists from EA * capability then use it.
*/ if (fixed_buses)
max = fixed_sub;
pci_bus_update_busn_res_end(child, max);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
}
/* Check that all devices are accessible */ while (bus->parent) { if ((child->busn_res.end > bus->busn_res.end) ||
(child->number > bus->busn_res.end) ||
(child->number < bus->number) ||
(child->busn_res.end < bus->number)) {
dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n",
&child->busn_res); break;
}
bus = bus->parent;
}
out: /* Clear errors in the Secondary Status Register */
pci_write_config_word(dev, PCI_SEC_STATUS, 0xffff);
/* * pci_scan_bridge() - Scan buses behind a bridge * @bus: Parent bus the bridge is on * @dev: Bridge itself * @max: Starting subordinate number of buses behind this bridge * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges * that need to be reconfigured. * * If it's a bridge, configure it and scan the bus behind it. * For CardBus bridges, we don't scan behind as the devices will * be handled by the bridge driver itself. * * We need to process bridges in two passes -- first we scan those * already configured by the BIOS and after we are done with all of * them, we proceed to assigning numbers to the remaining buses in * order to avoid overlaps between old and new bus numbers. * * Return: New subordinate number covering all buses behind this bridge.
*/ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
{ return pci_scan_bridge_extend(bus, dev, max, 0, pass);
}
EXPORT_SYMBOL(pci_scan_bridge);
/* * Read interrupt line and base address registers. * The architecture-dependent code can tweak these, of course.
*/ staticvoid pci_read_irq(struct pci_dev *dev)
{ unsignedchar irq;
/* VFs are not allowed to use INTx, so skip the config reads */ if (dev->is_virtfn) {
dev->pin = 0;
dev->irq = 0; return;
}
pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32); if (reg32 & PCI_EXP_LNKCAP_DLLLARC)
pdev->link_active_reporting = 1;
parent = pci_upstream_bridge(pdev); if (!parent) return;
/* * Some systems do not identify their upstream/downstream ports * correctly so detect impossible configurations here and correct * the port type accordingly.
*/ if (type == PCI_EXP_TYPE_DOWNSTREAM) { /* * If pdev claims to be downstream port but the parent * device is also downstream port assume pdev is actually * upstream port.
*/ if (pcie_downstream_port(parent)) {
pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n");
pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM;
}
} elseif (type == PCI_EXP_TYPE_UPSTREAM) { /* * If pdev claims to be upstream port but the parent * device is also upstream port assume pdev is actually * downstream port.
*/ if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) {
pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n");
pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE;
pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM;
}
}
}
/* Is the device part of a Thunderbolt controller? */
vsec = pci_find_vsec_capability(dev, PCI_VENDOR_ID_INTEL, PCI_VSEC_ID_INTEL_TBT); if (vsec)
dev->is_thunderbolt = 1;
}
if (!parent) return; /* * If the upstream bridge is untrusted we treat this device as * untrusted as well.
*/ if (parent->untrusted) {
dev->untrusted = true; return;
}
if (arch_pci_dev_is_removable(dev)) {
pci_dbg(dev, "marking as untrusted\n");
dev->untrusted = true;
}
}
if (!parent) return; /* * We (only) consider everything tunneled below an external_facing * device to be removable by the user. We're mainly concerned with * consumer platforms with user accessible thunderbolt ports that are * vulnerable to DMA attacks, and we expect those ports to be marked by * the firmware as external_facing. Devices in traditional hotplug * slots can technically be removed, but the expectation is that unless * the port is marked with external_facing, such devices are less * accessible to user / may not be removed by end user, and thus not * exposed as "removable" to userspace.
*/ if (dev_is_removable(&parent->dev)) {
dev_set_removable(&dev->dev, DEVICE_REMOVABLE); return;
}
if (arch_pci_dev_is_removable(dev)) {
pci_dbg(dev, "marking as removable\n");
dev_set_removable(&dev->dev, DEVICE_REMOVABLE);
}
}
/** * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config? * @dev: PCI device * * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that * when forwarding a type1 configuration request the bridge must check that * the extended register address field is zero. The bridge is not permitted * to forward the transactions and must handle it as an Unsupported Request. * Some bridges do not follow this rule and simply drop the extended register * bits, resulting in the standard config space being aliased, every 256 * bytes across the entire configuration space. Test for this condition by * comparing the first dword of each potential alias to the vendor/device ID. * Known offenders: * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
*/ staticbool pci_ext_cfg_is_aliased(struct pci_dev *dev)
{ #ifdef CONFIG_PCI_QUIRKS int pos, ret;
u32 header, tmp;
for (pos = PCI_CFG_SPACE_SIZE;
pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
ret = pci_read_config_dword(dev, pos, &tmp); if ((ret != PCIBIOS_SUCCESSFUL) || (header != tmp)) returnfalse;
}
returntrue; #else returnfalse; #endif
}
/** * pci_cfg_space_size_ext - Get the configuration space size of the PCI device * @dev: PCI device * * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices * have 4096 bytes. Even if the device is capable, that doesn't mean we can * access it. Maybe we don't have a way to generate extended config space * accesses, or the device is behind a reverse Express bridge. So we try * reading the dword at 0x100 which must either be 0 or a valid extended * capability header.
*/ staticint pci_cfg_space_size_ext(struct pci_dev *dev)
{
u32 status; int pos = PCI_CFG_SPACE_SIZE;
if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) return PCI_CFG_SPACE_SIZE; if (PCI_POSSIBLE_ERROR(status) || pci_ext_cfg_is_aliased(dev)) return PCI_CFG_SPACE_SIZE;
return PCI_CFG_SPACE_EXP_SIZE;
}
int pci_cfg_space_size(struct pci_dev *dev)
{ int pos;
u32 status;
u16 class;
#ifdef CONFIG_PCI_IOV /* * Per the SR-IOV specification (rev 1.1, sec 3.5), VFs are required to * implement a PCIe capability and therefore must implement extended * config space. We can skip the NO_EXTCFG test below and the * reachability/aliasing test in pci_cfg_space_size_ext() by virtue of * the fact that the SR-IOV capability on the PF resides in extended * config space and must be accessible and non-aliased to have enabled * support for this VF. This is a micro performance optimization for * systems supporting many VFs.
*/ if (dev->is_virtfn) return PCI_CFG_SPACE_EXP_SIZE; #endif
if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG) return PCI_CFG_SPACE_SIZE;
class = dev->class >> 8; if (class == PCI_CLASS_BRIDGE_HOST) return pci_cfg_space_size_ext(dev);
if (pci_is_pcie(dev)) return pci_cfg_space_size_ext(dev);
pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) return PCI_CFG_SPACE_SIZE;
/* * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI * r2.3, so strictly speaking, a device is not *broken* if it's not * writable. But we'll live with the misnomer for now.
*/ if (new != toggle) return 1; return 0;
}
if (pci_is_pcie(dev)) {
type = pci_pcie_type(dev); if (type < ARRAY_SIZE(str)) return str[type];
return"PCIe unknown";
}
switch (dev->hdr_type) { case PCI_HEADER_TYPE_NORMAL: return"conventional PCI endpoint"; case PCI_HEADER_TYPE_BRIDGE: return"conventional PCI bridge"; case PCI_HEADER_TYPE_CARDBUS: return"CardBus bridge"; default: return"conventional PCI";
}
}
/** * pci_setup_device - Fill in class and map information of a device * @dev: the device structure to fill * * Initialize the device structure with information about the device's * vendor,class,memory and IO-space addresses, IRQ lines etc. * Called at initialisation of the PCI subsystem and by CardBus services. * Returns 0 on success and negative if unknown type of device (not normal, * bridge or CardBus).
*/ int pci_setup_device(struct pci_dev *dev)
{
u32 class;
u16 cmd;
u8 hdr_type; int err, pos = 0; struct pci_bus_region region; struct resource *res;
err = pci_set_of_node(dev); if (err) return err;
pci_set_acpi_fwnode(dev);
pci_dev_assign_slot(dev);
/* * Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) * set this higher, assuming the system even supports it.
*/
dev->dma_mask = 0xffffffff;
/* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ if (dev->is_virtfn) return;
/* * For Root Complex Integrated Endpoints, program the maximum * supported value unless limited by the PCIE_BUS_PEER2PEER case.
*/ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { if (pcie_bus_config == PCIE_BUS_PEER2PEER)
mps = 128; else
mps = 128 << dev->pcie_mpss;
rc = pcie_set_mps(dev, mps); if (rc) {
pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
mps);
} return;
}
if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
pci_warn(dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
mps, pci_name(bridge), p_mps); return;
}
/* * Fancier MPS configuration is done later by * pcie_bus_configure_settings()
*/ if (pcie_bus_config != PCIE_BUS_DEFAULT) return;
mpss = 128 << dev->pcie_mpss; if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
pcie_set_mps(bridge, mpss);
pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
mpss, p_mps, 128 << bridge->pcie_mpss);
p_mps = pcie_get_mps(bridge);
}
rc = pcie_set_mps(dev, p_mps); if (rc) {
pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
p_mps); return;
}
pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n",
p_mps, mps, mpss);
}
int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
{ struct pci_host_bridge *host;
u32 cap;
u16 ctl; int ret;
if (!pci_is_pcie(dev)) return 0;
ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); if (ret) return 0;
if (!(cap & PCI_EXP_DEVCAP_EXT_TAG)) return 0;
ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); if (ret) return 0;
host = pci_find_host_bridge(dev->bus); if (!host) return 0;
/* * If some device in the hierarchy doesn't handle Extended Tags * correctly, make sure they're disabled.
*/ if (host->no_ext_tags) { if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
pci_info(dev, "disabling Extended Tags\n");
pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_EXT_TAG);
} return 0;
}
/* PCI_EXP_DEVCTL_RELAX_EN is RsvdP in VFs */ if (dev->is_virtfn) return;
if (!pcie_relaxed_ordering_enabled(dev)) return;
/* * For now, we only deal with Relaxed Ordering issues with Root * Ports. Peer-to-Peer DMA is another can of worms.
*/
root = pcie_find_root_port(dev); if (!root) return;
if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_RELAX_EN);
pci_info(dev, "Relaxed Ordering disabled because the Root Port didn't support it\n");
}
}
/* * A bridge will not forward ERR_ messages coming from an * endpoint unless SERR# forwarding is enabled.
*/
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control); if (!(control & PCI_BRIDGE_CTL_SERR)) {
control |= PCI_BRIDGE_CTL_SERR;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control);
}
}
}
/** * pci_release_dev - Free a PCI device structure when all users of it are * finished * @dev: device that's been disconnected * * Will be called only by the device core when all users of this PCI device are * done.
*/ staticvoid pci_release_dev(struct device *dev)
{ struct pci_dev *pci_dev;
staticbool pci_bus_wait_rrs(struct pci_bus *bus, int devfn, u32 *l, int timeout)
{ int delay = 1;
if (!pci_bus_rrs_vendor_id(*l)) returntrue; /* not a Configuration RRS completion */
if (!timeout) returnfalse; /* RRS, but caller doesn't want to wait */
/* * We got the reserved Vendor ID that indicates a completion with * Configuration Request Retry Status (RRS). Retry until we get a * valid Vendor ID or we time out.
*/ while (pci_bus_rrs_vendor_id(*l)) { if (delay > timeout) {
pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
pci_domain_nr(bus), bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Diese beiden folgenden Angebotsgruppen bietet das Unternehmen0.25Angebot
Wie Sie bei der Firma Beratungs- und Dienstleistungen beauftragen können
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.