staticbool eeh_edev_actionable(struct eeh_dev *edev)
{ if (!edev->pdev) returnfalse; if (edev->pdev->error_state == pci_channel_io_perm_failure) returnfalse; if (eeh_dev_removed(edev)) returnfalse; if (eeh_pe_passed(edev->pe)) returnfalse;
returntrue;
}
/** * eeh_pcid_get - Get the PCI device driver * @pdev: PCI device * * The function is used to retrieve the PCI device driver for * the indicated PCI device. Besides, we will increase the reference * of the PCI device driver to prevent that being unloaded on * the fly. Otherwise, kernel crash would be seen.
*/ staticinlinestruct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
{ if (!pdev || !pdev->dev.driver) return NULL;
if (!try_module_get(pdev->dev.driver->owner)) return NULL;
return to_pci_driver(pdev->dev.driver);
}
/** * eeh_pcid_put - Dereference on the PCI device driver * @pdev: PCI device * * The function is called to do dereference on the PCI device * driver of the indicated PCI device.
*/ staticinlinevoid eeh_pcid_put(struct pci_dev *pdev)
{ if (!pdev || !pdev->dev.driver) return;
module_put(pdev->dev.driver->owner);
}
/** * eeh_disable_irq - Disable interrupt for the recovering device * @dev: PCI device * * This routine must be called when reporting temporary or permanent * error to the particular PCI device to disable interrupt of that * device. If the device has enabled MSI or MSI-X interrupt, we needn't * do real work because EEH should freeze DMA transfers for those PCI * devices encountering EEH errors, which includes MSI or MSI-X.
*/ staticvoid eeh_disable_irq(struct eeh_dev *edev)
{ /* Don't disable MSI and MSI-X interrupts. They are * effectively disabled by the DMA Stopped state * when an EEH error occurs.
*/ if (edev->pdev->msi_enabled || edev->pdev->msix_enabled) return;
/** * eeh_enable_irq - Enable interrupt for the recovering device * @dev: PCI device * * This routine must be called to enable interrupt while failed * device could be resumed.
*/ staticvoid eeh_enable_irq(struct eeh_dev *edev)
{ if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
edev->mode &= ~EEH_DEV_IRQ_DISABLED; /* * FIXME !!!!! * * This is just ass backwards. This maze has * unbalanced irq_enable/disable calls. So instead of * finding the root cause it works around the warning * in the irq_enable code by conditionally calling * into it. * * That's just wrong.The warning in the core code is * there to tell people to fix their asymmetries in * their own code, not by abusing the core information * to avoid it. * * I so wish that the assymetry would be the other way * round and a few more irq_disable calls render that * shit unusable forever. * * tglx
*/ if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
enable_irq(edev->pdev->irq);
}
}
/* * We cannot access the config space on some adapters. * Otherwise, it will cause fenced PHB. We don't save * the content in their config space and will restore * from the initial config space saved when the EEH * device is created.
*/ if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) return;
pdev = eeh_dev_to_pci_dev(edev); if (!pdev) return;
/** * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled * @edev: eeh device * @driver: device's PCI driver * * Tells each device driver that IO ports, MMIO and config space I/O * are now enabled.
*/ staticenum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev, struct pci_dev *pdev, struct pci_driver *driver)
{ if (!driver->err_handler->mmio_enabled) return PCI_ERS_RESULT_NONE;
eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name); return driver->err_handler->mmio_enabled(pdev);
}
/** * eeh_report_reset - Tell device that slot has been reset * @edev: eeh device * @driver: device's PCI driver * * This routine must be called while EEH tries to reset particular * PCI device so that the associated PCI device driver could take * some actions, usually to save data the driver needs so that the * driver can work again while the device is recovered.
*/ staticenum pci_ers_result eeh_report_reset(struct eeh_dev *edev, struct pci_dev *pdev, struct pci_driver *driver)
{ if (!driver->err_handler->slot_reset || !edev->in_error) return PCI_ERS_RESULT_NONE;
eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name); return driver->err_handler->slot_reset(pdev);
}
/* * The content in the config space isn't saved because * the blocked config space on some adapters. We have * to restore the initial saved config space when the * EEH device is created.
*/ if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) { if (list_is_last(&edev->entry, &edev->pe->edevs))
eeh_pe_restore_bars(edev->pe);
pci_unlock_rescan_remove(); return;
}
pdev = eeh_dev_to_pci_dev(edev); if (!pdev) {
pci_unlock_rescan_remove(); return;
}
pci_restore_state(pdev);
pci_unlock_rescan_remove();
}
/** * eeh_report_resume - Tell device to resume normal operations * @edev: eeh device * @driver: device's PCI driver * * This routine must be called to notify the device driver that it * could resume so that the device driver can do some initialization * to make the recovered device work again.
*/ staticenum pci_ers_result eeh_report_resume(struct eeh_dev *edev, struct pci_dev *pdev, struct pci_driver *driver)
{ if (!driver->err_handler->resume || !edev->in_error) return PCI_ERS_RESULT_NONE;
/** * eeh_report_failure - Tell device driver that device is dead. * @edev: eeh device * @driver: device's PCI driver * * This informs the device driver that the device is permanently * dead, and that no further recovery attempts will be made on it.
*/ staticenum pci_ers_result eeh_report_failure(struct eeh_dev *edev, struct pci_dev *pdev, struct pci_driver *driver)
{ enum pci_ers_result rc;
if (!driver->err_handler->error_detected) return PCI_ERS_RESULT_NONE;
/* * Actually, we should remove the PCI bridges as well. * However, that's lots of complexity to do that, * particularly some of devices under the bridge might * support EEH. So we just care about PCI devices for * simplicity here.
*/ if (!eeh_edev_actionable(edev) ||
(dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) return;
if (rmv_data) {
driver = eeh_pcid_get(dev); if (driver) { if (driver->err_handler &&
driver->err_handler->error_detected &&
driver->err_handler->slot_reset) {
eeh_pcid_put(dev); return;
}
eeh_pcid_put(dev);
}
}
/* Remove it from PCI subsystem */
pr_info("EEH: Removing %s without EEH sensitive driver\n",
pci_name(dev));
edev->mode |= EEH_DEV_DISCONNECTED; if (rmv_data)
rmv_data->removed_dev_count++;
/* * Explicitly clear PE's frozen state for PowerNV where * we have frozen PE until BAR restore is completed. It's * harmless to clear it for pSeries. To be consistent with * PE reset (for 3 times), we try to clear the frozen state * for 3 times as well.
*/ staticint eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed)
{ struct eeh_pe *pe; int i;
eeh_for_each_pe(root, pe) { if (include_passed || !eeh_pe_passed(pe)) { for (i = 0; i < 3; i++) if (!eeh_unfreeze_pe(pe)) break; if (i >= 3) return -EIO;
}
}
eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed); return 0;
}
int eeh_pe_reset_and_recover(struct eeh_pe *pe)
{ int ret;
/* Bail if the PE is being recovered */ if (pe->state & EEH_PE_RECOVERING) return 0;
/* Put the PE into recovery mode */
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
/* Save states */
eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
/* Issue reset */
ret = eeh_pe_reset_full(pe, true); if (ret) {
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); return ret;
}
/* Unfreeze the PE */
ret = eeh_clear_pe_frozen_state(pe, true); if (ret) {
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true); return ret;
}
/* Restore device state */
eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
/** * eeh_reset_device - Perform actual reset of a pci slot * @driver_eeh_aware: Does the device's driver provide EEH support? * @pe: EEH PE * @bus: PCI bus corresponding to the isolcated slot * @rmv_data: Optional, list to record removed devices * * This routine must be called to do reset on the indicated PE. * During the reset, udev might be invoked because those affected * PCI devices will be removed and then added.
*/ staticint eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus, struct eeh_rmv_data *rmv_data, bool driver_eeh_aware)
{
time64_t tstamp; int cnt, rc; struct eeh_dev *edev; struct eeh_pe *tmp_pe; bool any_passed = false;
/* pcibios will clear the counter; save the value */
cnt = pe->freeze_count;
tstamp = pe->tstamp;
/* * We don't remove the corresponding PE instances because * we need the information afterwords. The attached EEH * devices are expected to be attached soon when calling * into pci_hp_add_devices().
*/
eeh_pe_state_mark(pe, EEH_PE_KEEP); if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
} else {
pci_hp_remove_devices(bus);
}
/* * Reset the pci controller. (Asserts RST#; resets config space). * Reconfigure bridges and devices. Don't try to bring the system * up if the reset failed for some reason. * * During the reset, it's very dangerous to have uncontrolled PCI * config accesses. So we prefer to block them. However, controlled * PCI config accesses initiated from EEH itself are allowed.
*/
rc = eeh_pe_reset_full(pe, false); if (rc) return rc;
/* Restore PE */
eeh_ops->configure_bridge(pe);
eeh_pe_restore_bars(pe);
/* Clear frozen state */
rc = eeh_clear_pe_frozen_state(pe, false); if (rc) { return rc;
}
/* Give the system 5 seconds to finish running the user-space * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes, * this is a hack, but if we don't do this, and try to bring * the device up before the scripts have taken it down, * potentially weird things happen.
*/ if (!driver_eeh_aware || rmv_data->removed_dev_count) {
pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
(driver_eeh_aware ? "partial" : "complete"));
ssleep(5);
/* * The EEH device is still connected with its parent * PE. We should disconnect it so the binding can be * rebuilt when adding PCI devices.
*/
edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL); if (pe->type & EEH_PE_VF) {
eeh_add_virt_device(edev);
} else { if (!driver_eeh_aware)
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
pci_hp_add_devices(bus);
}
}
eeh_pe_state_clear(pe, EEH_PE_KEEP, true);
pe->tstamp = tstamp;
pe->freeze_count = cnt;
return 0;
}
/* The longest amount of time to wait for a pci device * to come back on line, in seconds.
*/ #define MAX_WAIT_FOR_RECOVERY 300
/* Walks the PE tree after processing an event to remove any stale PEs. * * NB: This needs to be recursive to ensure the leaf PEs get removed * before their parents do. Although this is possible to do recursively * we don't since this is easier to read and we need to garantee * the leaf nodes will be handled first.
*/ staticvoid eeh_pe_cleanup(struct eeh_pe *pe)
{ struct eeh_pe *child_pe, *tmp;
if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
list_del(&pe->child);
kfree(pe);
}
}
/** * eeh_check_slot_presence - Check if a device is still present in a slot * @pdev: pci_dev to check * * This function may return a false positive if we can't determine the slot's * presence state. This might happen for PCIe slots if the PE containing * the upstream bridge is also frozen, or the bridge is part of the same PE * as the device. * * This shouldn't happen often, but you might see it if you hotplug a PCIe * switch.
*/ staticbool eeh_slot_presence_check(struct pci_dev *pdev)
{ conststruct hotplug_slot_ops *ops; struct pci_slot *slot;
u8 state; int rc;
if (!pdev) returnfalse;
if (pdev->error_state == pci_channel_io_perm_failure) returnfalse;
slot = pdev->slot; if (!slot || !slot->hotplug) returntrue;
ops = slot->hotplug->ops; if (!ops || !ops->get_adapter_status) returntrue;
/* set the attention indicator while we've got the slot ops */ if (ops->set_attention_status)
ops->set_attention_status(slot->hotplug, 1);
rc = ops->get_adapter_status(slot->hotplug, &state); if (rc) returntrue;
if (pdev->error_state == pci_channel_io_perm_failure) return;
slot = pdev->slot; if (!slot || !slot->hotplug) return;
ops = slot->hotplug->ops; if (!ops || !ops->set_attention_status) return;
ops->set_attention_status(slot->hotplug, 0);
}
/** * eeh_handle_normal_event - Handle EEH events on a specific PE * @pe: EEH PE - which should not be used after we return, as it may * have been invalidated. * * Attempts to recover the given PE. If recovery fails or the PE has failed * too many times, remove the PE. * * While PHB detects address or data parity errors on particular PCI * slot, the associated PE will be frozen. Besides, DMA's occurring * to wild addresses (which usually happen due to bugs in device * drivers or in PCI adapter firmware) can cause EEH error. #SERR, * #PERR or other misc PCI-related errors also can trigger EEH errors. * * Recovery process consists of unplugging the device driver (which * generated hotplug events to userspace), then issuing a PCI #RST to * the device, then reconfiguring the PCI config space for all bridges * & devices under this slot, and then finally restarting the device * drivers (which cause a second set of hotplug events to go out to * userspace).
*/ void eeh_handle_normal_event(struct eeh_pe *pe)
{ struct pci_bus *bus; struct eeh_dev *edev, *tmp; struct eeh_pe *tmp_pe; int rc = 0; enum pci_ers_result result = PCI_ERS_RESULT_NONE; struct eeh_rmv_data rmv_data =
{LIST_HEAD_INIT(rmv_data.removed_vf_list), 0}; int devices = 0;
pci_lock_rescan_remove();
bus = eeh_pe_bus_get(pe); if (!bus) {
pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
pci_unlock_rescan_remove(); return;
}
/* * When devices are hot-removed we might get an EEH due to * a driver attempting to touch the MMIO space of a removed * device. In this case we don't have a device to recover * so suppress the event if we can't find any present devices. * * The hotplug driver should take care of tearing down the * device itself.
*/
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp) if (eeh_slot_presence_check(edev->pdev))
devices++;
if (!devices) {
pr_warn("EEH: Frozen PHB#%x-PE#%x is empty!\n",
pe->phb->global_number, pe->addr); /* * The device is removed, tear down its state, on powernv * hotplug driver would take care of it but not on pseries, * permanently disable the card as it is hot removed. * * In the case of powernv, note that the removal of device * is covered by pci rescan lock, so no problem even if hotplug * driver attempts to remove the device.
*/ goto recover_failed;
}
#ifdef CONFIG_STACKTRACE /* * Print the saved stack trace now that we've verified there's * something to recover.
*/ if (pe->trace_entries) { void **ptrs = (void **) pe->stack_trace; int i;
/* FIXME: Use the same format as dump_stack() */
pr_err("EEH: Call Trace:\n"); for (i = 0; i < pe->trace_entries; i++)
pr_err("EEH: [%p] %pS\n", ptrs[i], ptrs[i]);
eeh_pe_update_time_stamp(pe);
pe->freeze_count++; if (pe->freeze_count > eeh_max_freezes) {
pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
pe->phb->global_number, pe->addr,
pe->freeze_count);
goto recover_failed;
}
/* Walk the various device drivers attached to this slot through * a reset sequence, giving each an opportunity to do what it needs * to accomplish the reset. Each child gets a report of the * status ... if any child can't handle the reset, then the entire * slot is dlpar removed and added. * * When the PHB is fenced, we have to issue a reset to recover from * the error. Override the result if necessary to have partially * hotplug for this case.
*/
pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
pe->freeze_count, eeh_max_freezes);
pr_info("EEH: Notify device drivers to shutdown\n");
eeh_set_channel_state(pe, pci_channel_io_frozen);
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(IO frozen)", pe,
eeh_report_error, &result); if (result == PCI_ERS_RESULT_DISCONNECT) goto recover_failed;
/* * Error logged on a PHB are always fences which need a full * PHB reset to clear so force that to happen.
*/ if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE)
result = PCI_ERS_RESULT_NEED_RESET;
/* Get the current PCI slot state. This can take a long time, * sometimes over 300 seconds for certain systems.
*/
rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000); if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
pr_warn("EEH: Permanent failure\n"); goto recover_failed;
}
/* Since rtas may enable MMIO when posting the error log, * don't post the error log until after all dev drivers * have been informed.
*/
pr_info("EEH: Collect temporary log\n");
eeh_slot_error_detail(pe, EEH_LOG_TEMP);
/* If all device drivers were EEH-unaware, then shut * down all of the device drivers, and hope they * go down willingly, without panicing the system.
*/ if (result == PCI_ERS_RESULT_NONE) {
pr_info("EEH: Reset with hotplug activity\n");
rc = eeh_reset_device(pe, bus, NULL, false); if (rc) {
pr_warn("%s: Unable to reset, err=%d\n", __func__, rc); goto recover_failed;
}
}
/* If all devices reported they can proceed, then re-enable MMIO */ if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enable I/O for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); if (rc < 0) goto recover_failed;
if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
pr_info("EEH: Notify device drivers to resume I/O\n");
eeh_pe_report("mmio_enabled", pe,
eeh_report_mmio_enabled, &result);
}
} if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enabled DMA for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA); if (rc < 0) goto recover_failed;
if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else { /* * We didn't do PE reset for the case. The PE * is still in frozen state. Clear it before * resuming the PE.
*/
eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
result = PCI_ERS_RESULT_RECOVERED;
}
}
/* If any device called out for a reset, then reset the slot */ if (result == PCI_ERS_RESULT_NEED_RESET) {
pr_info("EEH: Reset without hotplug activity\n");
rc = eeh_reset_device(pe, bus, &rmv_data, true); if (rc) {
pr_warn("%s: Cannot reset, err=%d\n", __func__, rc); goto recover_failed;
}
if ((result == PCI_ERS_RESULT_RECOVERED) ||
(result == PCI_ERS_RESULT_NONE)) { /* * For those hot removed VFs, we should add back them after PF * get recovered properly.
*/
list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
rmv_entry) {
eeh_add_virt_device(edev);
list_del(&edev->rmv_entry);
}
/* Tell all device drivers that they can resume operations */
pr_info("EEH: Notify device driver to resume\n");
eeh_set_channel_state(pe, pci_channel_io_normal);
eeh_set_irq_state(pe, true);
eeh_pe_report("resume", pe, eeh_report_resume, NULL);
eeh_for_each_pe(pe, tmp_pe) {
eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
edev->mode &= ~EEH_DEV_NO_HANDLER;
edev->in_error = false;
}
}
recover_failed: /* * About 90% of all real-life EEH failures in the field * are due to poorly seated PCI cards. Only 10% or so are * due to actual, failed cards.
*/
pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n" "Please try reseating or replacing it\n",
pe->phb->global_number, pe->addr);
eeh_slot_error_detail(pe, EEH_LOG_PERM);
/* Notify all devices that they're about to go down. */
eeh_set_irq_state(pe, false);
eeh_pe_report("error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
/* Mark the PE to be removed permanently */
eeh_pe_state_mark(pe, EEH_PE_REMOVED);
/* * Shut down the device drivers for good. We mark * all removed devices correctly to avoid access * the their PCI config any more.
*/ if (pe->type & EEH_PE_VF) {
eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
} else {
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
bus = eeh_pe_bus_get(pe); if (bus)
pci_hp_remove_devices(bus); else
pr_err("%s: PCI bus for PHB#%x-PE#%x disappeared\n",
__func__, pe->phb->global_number, pe->addr);
/* The passed PE should no longer be used */
pci_unlock_rescan_remove(); return;
}
out: /* * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING * we don't want to modify the PE tree structure so we do it here.
*/
eeh_pe_cleanup(pe);
/* clear the slot attention LED for all recovered devices */
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp)
eeh_clear_slot_attention(edev->pdev);
eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
pci_unlock_rescan_remove();
}
/** * eeh_handle_special_event - Handle EEH events without a specific failing PE * * Called when an EEH event is detected but can't be narrowed down to a * specific PE. Iterates through possible failures and handles them as * necessary.
*/ void eeh_handle_special_event(void)
{ struct eeh_pe *pe, *phb_pe, *tmp_pe; struct eeh_dev *edev, *tmp_edev; struct pci_bus *bus; struct pci_controller *hose; unsignedlong flags; int rc;
pci_lock_rescan_remove();
do {
rc = eeh_ops->next_error(&pe);
switch (rc) { case EEH_NEXT_ERR_DEAD_IOC: /* Mark all PHBs in dead state */
eeh_serialize_lock(&flags);
/* Purge all events */
eeh_remove_event(NULL, true);
list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose); if (!phb_pe) continue;
eeh_pe_mark_isolated(phb_pe);
}
eeh_serialize_unlock(flags);
break; case EEH_NEXT_ERR_FROZEN_PE: case EEH_NEXT_ERR_FENCED_PHB: case EEH_NEXT_ERR_DEAD_PHB: /* Mark the PE in fenced state */
eeh_serialize_lock(&flags);
/* Purge all events of the PHB */
eeh_remove_event(pe, true);
if (rc != EEH_NEXT_ERR_DEAD_PHB)
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
eeh_pe_mark_isolated(pe);
eeh_serialize_unlock(flags);
break; case EEH_NEXT_ERR_NONE:
pci_unlock_rescan_remove(); return; default:
pr_warn("%s: Invalid value %d from next_error()\n",
__func__, rc);
pci_unlock_rescan_remove(); return;
}
/* * For fenced PHB and frozen PE, it's handled as normal * event. We have to remove the affected PHBs for dead * PHB and IOC
*/ if (rc == EEH_NEXT_ERR_FROZEN_PE ||
rc == EEH_NEXT_ERR_FENCED_PHB) {
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
pci_unlock_rescan_remove();
eeh_handle_normal_event(pe);
pci_lock_rescan_remove();
} else {
eeh_for_each_pe(pe, tmp_pe)
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
edev->mode &= ~EEH_DEV_NO_HANDLER;
/* Notify all devices to be down */
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
eeh_pe_report( "error_detected(permanent failure)", pe,
eeh_report_failure, NULL);
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
bus = eeh_pe_bus_get(phb_pe); if (!bus) {
pr_err("%s: Cannot find PCI bus for " "PHB#%x-PE#%x\n",
__func__,
pe->phb->global_number,
pe->addr); break;
}
pci_hp_remove_devices(bus);
}
}
/* * If we have detected dead IOC, we needn't proceed * any more since all PHBs would have been removed
*/ if (rc == EEH_NEXT_ERR_DEAD_IOC) break;
} while (rc != EEH_NEXT_ERR_NONE);
pci_unlock_rescan_remove();
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.