// SPDX-License-Identifier: GPL-2.0 /* * Copyright IBM Corp. 2012 * * Author(s): * Jan Glauber <jang@linux.vnet.ibm.com> * * The System z PCI code is a rewrite from a prototype by * the following people (Kudoz!): * Alexander Schmidt * Christoph Raisch * Hannes Hering * Hoang-Nam Nguyen * Jan-Bernd Themann * Stefan Roscher * Thomas Klein
*/
/* list of all detected zpci devices */ static LIST_HEAD(zpci_list); static DEFINE_SPINLOCK(zpci_list_lock); static DEFINE_MUTEX(zpci_add_remove_lock);
/* AEN structures that must be preserved over KVM module re-insertion */ union zpci_sic_iib *zpci_aipb;
EXPORT_SYMBOL_GPL(zpci_aipb); struct airq_iv *zpci_aif_sbv;
EXPORT_SYMBOL_GPL(zpci_aif_sbv);
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
pgprot_t prot)
{ /* * When PCI MIO instructions are unavailable the "physical" address * encodes a hint for accessing the PCI memory space it represents. * Just pass it unchanged such that ioread/iowrite can decode it.
*/ if (!static_branch_unlikely(&have_mio)) return (void __iomem *)phys_addr;
void iounmap(volatilevoid __iomem *addr)
{ if (static_branch_likely(&have_mio))
generic_iounmap(addr);
}
EXPORT_SYMBOL(iounmap);
/* Create a virtual mapping cookie for a PCI BAR */ staticvoid __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar, unsignedlong offset, unsignedlong max)
{ struct zpci_dev *zdev = to_zpci(pdev); int idx;
void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar, unsignedlong offset, unsignedlong max)
{ if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) return NULL;
if (static_branch_likely(&have_mio)) return pci_iomap_range_mio(pdev, bar, offset, max); else return pci_iomap_range_fh(pdev, bar, offset, max);
}
EXPORT_SYMBOL(pci_iomap_range);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsignedlong maxlen)
{ return pci_iomap_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL(pci_iomap);
void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar, unsignedlong offset, unsignedlong max)
{ if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar)) return NULL;
if (static_branch_likely(&have_mio)) return pci_iomap_wc_range_mio(pdev, bar, offset, max); else return pci_iomap_range_fh(pdev, bar, offset, max);
}
EXPORT_SYMBOL(pci_iomap_wc_range);
void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsignedlong maxlen)
{ return pci_iomap_wc_range(dev, bar, 0, maxlen);
}
EXPORT_SYMBOL(pci_iomap_wc);
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
len = pci_resource_len(pdev, i); if (!len) continue;
pci_iounmap_fh(pdev, (void __iomem __force *)
pdev->resource[i].start);
}
}
int zpci_setup_bus_resources(struct zpci_dev *zdev)
{ unsignedlong addr, size, flags; struct resource *res; int i, entry;
snprintf(zdev->res_name, sizeof(zdev->res_name), "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (!zdev->bars[i].size) continue;
entry = zpci_alloc_iomap(zdev); if (entry < 0) return entry;
zdev->bars[i].map_idx = entry;
/* only MMIO is supported */
flags = IORESOURCE_MEM; if (zdev->bars[i].val & 8)
flags |= IORESOURCE_PREFETCH; if (zdev->bars[i].val & 4)
flags |= IORESOURCE_MEM_64;
spin_lock(&zpci_domain_lock); /* * We can always auto allocate domains below ZPCI_NR_DEVICES. * There is either a free domain or we have reached the maximum in * which case we would have bailed earlier.
*/
domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
set_bit(domain, zpci_domain);
spin_unlock(&zpci_domain_lock); return domain;
}
int zpci_alloc_domain(int domain)
{ if (zpci_unique_uid) { if (domain) return __zpci_register_domain(domain);
pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
update_uid_checking(false);
} return __zpci_alloc_domain();
}
int zpci_disable_device(struct zpci_dev *zdev)
{
u32 fh = zdev->fh; int cc, rc = 0;
cc = clp_disable_fh(zdev, &fh); if (!cc) {
zpci_update_fh(zdev, fh);
} elseif (cc == CLP_RC_SETPCIFN_ALRDY) {
pr_info("Disabling PCI function %08x had no effect as it was already disabled\n",
zdev->fid); /* Function is already disabled - update handle */
rc = clp_refresh_fh(zdev->fid, &fh); if (!rc) {
zpci_update_fh(zdev, fh);
rc = -EINVAL;
}
} else {
rc = -EIO;
} return rc;
}
EXPORT_SYMBOL_GPL(zpci_disable_device);
/** * zpci_hot_reset_device - perform a reset of the given zPCI function * @zdev: the slot which should be reset * * Performs a low level reset of the zPCI function. The reset is low level in * the sense that the zPCI function can be reset without detaching it from the * common PCI subsystem. The reset may be performed while under control of * either DMA or IOMMU APIs in which case the existing DMA/IOMMU translation * table is reinstated at the end of the reset. * * After the reset the functions internal state is reset to an initial state * equivalent to its state during boot when first probing a driver. * Consequently after reset the PCI function requires re-initialization via the * common PCI code including re-enabling IRQs via pci_alloc_irq_vectors() * and enabling the function via e.g. pci_enable_device_flags(). The caller * must guard against concurrent reset attempts. * * In most cases this function should not be called directly but through * pci_reset_function() or pci_reset_bus() which handle the save/restore and * locking - asserted by lockdep. * * Return: 0 on success and an error value otherwise
*/ int zpci_hot_reset_device(struct zpci_dev *zdev)
{ int rc;
lockdep_assert_held(&zdev->state_lock);
zpci_dbg(3, "rst fid:%x, fh:%x\n", zdev->fid, zdev->fh); if (zdev_enabled(zdev)) { /* Disables device access, DMAs and IRQs (reset state) */
rc = zpci_disable_device(zdev); /* * Due to a z/VM vs LPAR inconsistency in the error state the * FH may indicate an enabled device but disable says the * device is already disabled don't treat it as an error here.
*/ if (rc == -EINVAL)
rc = 0; if (rc) return rc;
}
rc = zpci_reenable_device(zdev);
return rc;
}
/** * zpci_create_device() - Create a new zpci_dev and add it to the zbus * @fid: Function ID of the device to be created * @fh: Current Function Handle of the device to be created * @state: Initial state after creation either Standby or Configured * * Allocates a new struct zpci_dev and queries the platform for its details. * If successful the device can subsequently be added to the zPCI subsystem * using zpci_add_device(). * * Returns: the zdev on success or an error pointer otherwise
*/ struct zpci_dev *zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
{ struct zpci_dev *zdev; int rc;
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); if (!zdev) return ERR_PTR(-ENOMEM);
/* FID and Function Handle are the static/dynamic identifiers */
zdev->fid = fid;
zdev->fh = fh;
/* Query function properties and update zdev */
rc = clp_query_pci_fn(zdev); if (rc) goto error;
zdev->state = state;
/** * zpci_add_device() - Add a previously created zPCI device to the zPCI subsystem * @zdev: The zPCI device to be added * * A struct zpci_dev is added to the zPCI subsystem and to a virtual PCI bus creating * a new one as necessary. A hotplug slot is created and events start to be handled. * If successful from this point on zpci_zdev_get() and zpci_zdev_put() must be used. * If adding the struct zpci_dev fails the device was not added and should be freed. * * Return: 0 on success, or an error code otherwise
*/ int zpci_add_device(struct zpci_dev *zdev)
{ int rc;
bool zpci_is_device_configured(struct zpci_dev *zdev)
{ enum zpci_state state = zdev->state;
return state != ZPCI_FN_STATE_RESERVED &&
state != ZPCI_FN_STATE_STANDBY;
}
/** * zpci_scan_configured_device() - Scan a freshly configured zpci_dev * @zdev: The zpci_dev to be configured * @fh: The general function handle supplied by the platform * * Given a device in the configuration state Configured, enables, scans and * adds it to the common code PCI subsystem if possible. If any failure occurs, * the zpci_dev is left disabled. * * Return: 0 on success, or an error code otherwise
*/ int zpci_scan_configured_device(struct zpci_dev *zdev, u32 fh)
{
zpci_update_fh(zdev, fh); return zpci_bus_scan_device(zdev);
}
/** * zpci_deconfigure_device() - Deconfigure a zpci_dev * @zdev: The zpci_dev to configure * * Deconfigure a zPCI function that is currently configured and possibly known * to the common code PCI subsystem. * If any failure occurs the device is left as is. * * Return: 0 on success, or an error code otherwise
*/ int zpci_deconfigure_device(struct zpci_dev *zdev)
{ int rc;
lockdep_assert_held(&zdev->state_lock); if (zdev->state != ZPCI_FN_STATE_CONFIGURED) return 0;
if (zdev->zbus->bus)
zpci_bus_remove_device(zdev, false);
if (zdev_enabled(zdev)) {
rc = zpci_disable_device(zdev); if (rc) return rc;
}
/** * zpci_device_reserved() - Mark device as reserved * @zdev: the zpci_dev that was reserved * * Handle the case that a given zPCI function was reserved by another system.
*/ void zpci_device_reserved(struct zpci_dev *zdev)
{
lockdep_assert_held(&zdev->state_lock); /* We may declare the device reserved multiple times */ if (zdev->state == ZPCI_FN_STATE_RESERVED) return;
zdev->state = ZPCI_FN_STATE_RESERVED;
zpci_dbg(3, "rsv fid:%x\n", zdev->fid); /* * The underlying device is gone. Allow the zdev to be freed * as soon as all other references are gone by accounting for * the removal as a dropped reference.
*/
zpci_zdev_put(zdev);
}
lockdep_assert_held(&zpci_add_remove_lock);
WARN_ON(zdev->state != ZPCI_FN_STATE_RESERVED); /* * We already hold zpci_list_lock thanks to kref_put_lock(). * This makes sure no new reference can be taken from the list.
*/
list_del(&zdev->entry);
spin_unlock(&zpci_list_lock);
if (zdev->has_hp_slot)
zpci_exit_slot(zdev);
if (zdev->has_resources)
zpci_cleanup_bus_resources(zdev);
/** * zpci_clear_error_state() - Clears the zPCI error state of the device * @zdev: The zdev for which the zPCI error state should be reset * * Clear the zPCI error state of the device. If clearing the zPCI error state * fails the device is left in the error state. In this case it may make sense * to call zpci_io_perm_failure() on the associated pdev if it exists. * * Returns: 0 on success, -EIO otherwise
*/ int zpci_clear_error_state(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_ERROR); struct zpci_fib fib = {0};
u8 status; int cc;
cc = zpci_mod_fc(req, &fib, &status); if (cc) {
zpci_dbg(3, "ces fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status); return -EIO;
}
return 0;
}
/** * zpci_reset_load_store_blocked() - Re-enables L/S from error state * @zdev: The zdev for which to unblock load/store access * * Re-enables load/store access for a PCI function in the error state while * keeping DMA blocked. In this state drivers can poke MMIO space to determine * if error recovery is possible while catching any rogue DMA access from the * device. * * Returns: 0 on success, -EIO otherwise
*/ int zpci_reset_load_store_blocked(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_RESET_BLOCK); struct zpci_fib fib = {0};
u8 status; int cc;
cc = zpci_mod_fc(req, &fib, &status); if (cc) {
zpci_dbg(3, "rls fid:%x, cc:%d, status:%x\n", zdev->fid, cc, status); return -EIO;
}
/* * PCI functions without RID available maintain original order * between themselves but sort before those with RID.
*/ if (za->rid == zb->rid) return za->rid_available > zb->rid_available; /* * PCI functions with RID sort by RID ascending.
*/ return za->rid > zb->rid;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.