// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006, Thomas Gleixner, Russell King * * This file contains the core interrupt handling code, for irq-chip based * architectures. Detailed information is available in * Documentation/core-api/genericirq.rst
*/
static irqreturn_t bad_chained_irq(int irq, void *dev_id)
{
WARN_ONCE(1, "Chained irq %d should not call an action\n", irq); return IRQ_NONE;
}
/* * Chained handlers should never call action on their IRQ. This default * action will emit warning if such thing happens.
*/ struct irqaction chained_action = {
.handler = bad_chained_irq,
};
/** * irq_set_chip - set the irq chip for an irq * @irq: irq number * @chip: pointer to irq chip description structure
*/ int irq_set_chip(unsignedint irq, conststruct irq_chip *chip)
{ int ret = -EINVAL;
scoped_irqdesc_get_and_lock(irq, 0) {
scoped_irqdesc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
ret = 0;
} /* For !CONFIG_SPARSE_IRQ make the irq show up in allocated_irqs. */ if (!ret)
irq_mark_irq(irq); return ret;
}
EXPORT_SYMBOL(irq_set_chip);
/** * irq_set_irq_type - set the irq trigger type for an irq * @irq: irq number * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
*/ int irq_set_irq_type(unsignedint irq, unsignedint type)
{
scoped_irqdesc_get_and_buslock(irq, IRQ_GET_DESC_CHECK_GLOBAL) return __irq_set_trigger(scoped_irqdesc, type); return -EINVAL;
}
EXPORT_SYMBOL(irq_set_irq_type);
/** * irq_set_handler_data - set irq handler data for an irq * @irq: Interrupt number * @data: Pointer to interrupt specific data * * Set the hardware irq controller data for an irq
*/ int irq_set_handler_data(unsignedint irq, void *data)
{
scoped_irqdesc_get_and_lock(irq, 0) {
scoped_irqdesc->irq_common_data.handler_data = data; return 0;
} return -EINVAL;
}
EXPORT_SYMBOL(irq_set_handler_data);
/** * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset * @irq_base: Interrupt number base * @irq_offset: Interrupt number offset * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq at offset
*/ int irq_set_msi_desc_off(unsignedint irq_base, unsignedint irq_offset, struct msi_desc *entry)
{
scoped_irqdesc_get_and_lock(irq_base + irq_offset, IRQ_GET_DESC_CHECK_GLOBAL) {
scoped_irqdesc->irq_common_data.msi_desc = entry; if (entry && !irq_offset)
entry->irq = irq_base; return 0;
} return -EINVAL;
}
/** * irq_set_msi_desc - set MSI descriptor data for an irq * @irq: Interrupt number * @entry: Pointer to MSI descriptor data * * Set the MSI descriptor entry for an irq
*/ int irq_set_msi_desc(unsignedint irq, struct msi_desc *entry)
{ return irq_set_msi_desc_off(irq, 0, entry);
}
/** * irq_set_chip_data - set irq chip data for an irq * @irq: Interrupt number * @data: Pointer to chip specific data * * Set the hardware irq chip data for an irq
*/ int irq_set_chip_data(unsignedint irq, void *data)
{
scoped_irqdesc_get_and_lock(irq, 0) {
scoped_irqdesc->irq_data.chip_data = data; return 0;
} return -EINVAL;
}
EXPORT_SYMBOL(irq_set_chip_data);
if (!irqd_affinity_is_managed(d)) return IRQ_STARTUP_NORMAL;
irqd_clr_managed_shutdown(d);
if (!cpumask_intersects(aff, cpu_online_mask)) { /* * Catch code which fiddles with enable_irq() on a managed * and potentially shutdown IRQ. Chained interrupt * installment or irq auto probing should not happen on * managed irqs either.
*/ if (WARN_ON_ONCE(force)) return IRQ_STARTUP_ABORT; /* * The interrupt was requested, but there is no online CPU * in it's affinity mask. Put it into managed shutdown * state and let the cpu hotplug mechanism start it up once * a CPU in the mask becomes available.
*/ return IRQ_STARTUP_ABORT;
} /* * Managed interrupts have reserved resources, so this should not * happen.
*/ if (WARN_ON(irq_domain_activate_irq(d, false))) return IRQ_STARTUP_ABORT; return IRQ_STARTUP_MANAGED;
}
/* * Clear managed-shutdown flag, so we don't repeat managed-startup for * multiple hotplugs, and cause imbalanced disable depth.
*/
irqd_clr_managed_shutdown(d);
/* * Only start it up when the disable depth is 1, so that a disable, * hotunplug, hotplug sequence does not end up enabling it during * hotplug unconditionally.
*/
desc->depth--; if (!desc->depth)
irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
}
void irq_shutdown(struct irq_desc *desc)
{ if (irqd_is_started(&desc->irq_data)) {
clear_irq_resend(desc); /* * Increment disable depth, so that a managed shutdown on * CPU hotunplug preserves the actual disabled state when the * CPU comes back online. See irq_startup_managed().
*/
desc->depth++;
void irq_shutdown_and_deactivate(struct irq_desc *desc)
{
irq_shutdown(desc); /* * This must be called even if the interrupt was never started up, * because the activation can happen before the interrupt is * available for request/startup. It has it's own state tracking so * it's safe to call it unconditionally.
*/
irq_domain_deactivate_irq(&desc->irq_data);
}
staticvoid __irq_disable(struct irq_desc *desc, bool mask)
{ if (irqd_irq_disabled(&desc->irq_data)) { if (mask)
mask_irq(desc);
} else {
irq_state_set_disabled(desc); if (desc->irq_data.chip->irq_disable) {
desc->irq_data.chip->irq_disable(&desc->irq_data);
irq_state_set_masked(desc);
} elseif (mask) {
mask_irq(desc);
}
}
}
/** * irq_disable - Mark interrupt disabled * @desc: irq descriptor which should be disabled * * If the chip does not implement the irq_disable callback, we * use a lazy disable approach. That means we mark the interrupt * disabled, but leave the hardware unmasked. That's an * optimization because we avoid the hardware access for the * common case where no interrupt happens after we marked it * disabled. If an interrupt happens, then the interrupt flow * handler masks the line at the hardware level and marks it * pending. * * If the interrupt chip does not implement the irq_disable callback, * a driver can disable the lazy approach for a particular irq line by * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can * be used for devices which cannot disable the interrupt at the * device level under certain circumstances and have to use * disable_irq[_nosync] instead.
*/ void irq_disable(struct irq_desc *desc)
{
__irq_disable(desc, irq_settings_disable_unlazy(desc));
}
if (chip->flags & IRQCHIP_EOI_THREADED)
chip->irq_eoi(&desc->irq_data);
unmask_irq(desc);
}
/* Busy wait until INPROGRESS is cleared */ staticbool irq_wait_on_inprogress(struct irq_desc *desc)
{ if (IS_ENABLED(CONFIG_SMP)) { do {
raw_spin_unlock(&desc->lock); while (irqd_irq_inprogress(&desc->irq_data))
cpu_relax();
raw_spin_lock(&desc->lock);
} while (irqd_irq_inprogress(&desc->irq_data));
/* Might have been disabled in meantime */ return !irqd_irq_disabled(&desc->irq_data) && desc->action;
} returnfalse;
}
/* * If the interrupt is not in progress and is not an armed * wakeup interrupt, proceed.
*/ if (!irqd_has_set(irqd, IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED)) returntrue;
/* * If the interrupt is an armed wakeup source, mark it pending * and suspended, disable it and notify the pm core about the * event.
*/ if (unlikely(irqd_has_set(irqd, IRQD_WAKEUP_ARMED))) {
irq_pm_handle_wakeup(desc); returnfalse;
}
/* Check whether the interrupt is polled on another CPU */ if (unlikely(desc->istate & IRQS_POLL_INPROGRESS)) { if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), "irq poll in progress on cpu %d for irq %d\n",
smp_processor_id(), desc->irq_data.irq)) returnfalse; return irq_wait_on_inprogress(desc);
}
/* The below works only for single target interrupts */ if (!IS_ENABLED(CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK) ||
!irqd_is_single_target(irqd) || desc->handle_irq != handle_edge_irq) returnfalse;
/* * If the interrupt affinity was moved to this CPU and the * interrupt is currently handled on the previous target CPU, then * busy wait for INPROGRESS to be cleared. Otherwise for edge type * interrupts the handler might get stuck on the previous target: * * CPU 0 CPU 1 (new target) * handle_edge_irq() * repeat: * handle_event() handle_edge_irq() * if (INPROGESS) { * set(PENDING); * mask(); * return; * } * if (PENDING) { * clear(PENDING); * unmask(); * goto repeat; * } * * This happens when the device raises interrupts with a high rate * and always before handle_event() completes and the CPU0 handler * can clear INPROGRESS. This has been observed in virtual machines.
*/
aff = irq_data_get_effective_affinity_mask(irqd); if (cpumask_first(aff) != smp_processor_id()) returnfalse; return irq_wait_on_inprogress(desc);
}
staticinlinebool irq_can_handle(struct irq_desc *desc)
{ if (!irq_can_handle_pm(desc)) returnfalse;
return irq_can_handle_actions(desc);
}
/** * handle_nested_irq - Handle a nested irq from a irq thread * @irq: the interrupt number * * Handle interrupts which are nested into a threaded interrupt * handler. The handler function is called inside the calling threads * context.
*/ void handle_nested_irq(unsignedint irq)
{ struct irq_desc *desc = irq_to_desc(irq); struct irqaction *action;
irqreturn_t action_ret;
might_sleep();
scoped_guard(raw_spinlock_irq, &desc->lock) { if (!irq_can_handle_actions(desc)) return;
/** * handle_simple_irq - Simple and software-decoded IRQs. * @desc: the interrupt description structure for this irq * * Simple interrupts are either sent from a demultiplexing interrupt * handler or come from hardware, where no interrupt hardware control is * necessary. * * Note: The caller is expected to handle the ack, clear, mask and unmask * issues if necessary.
*/ void handle_simple_irq(struct irq_desc *desc)
{
guard(raw_spinlock)(&desc->lock);
if (!irq_can_handle_pm(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data))
desc->istate |= IRQS_PENDING; return;
}
/** * handle_untracked_irq - Simple and software-decoded IRQs. * @desc: the interrupt description structure for this irq * * Untracked interrupts are sent from a demultiplexing interrupt handler * when the demultiplexer does not know which device it its multiplexed irq * domain generated the interrupt. IRQ's handled through here are not * subjected to stats tracking, randomness, or spurious interrupt * detection. * * Note: Like handle_simple_irq, the caller is expected to handle the ack, * clear, mask and unmask issues if necessary.
*/ void handle_untracked_irq(struct irq_desc *desc)
{
scoped_guard(raw_spinlock, &desc->lock) { if (!irq_can_handle(desc)) return;
/* * Called unconditionally from handle_level_irq() and only for oneshot * interrupts from handle_fasteoi_irq()
*/ staticvoid cond_unmask_irq(struct irq_desc *desc)
{ /* * We need to unmask in the following cases: * - Standard level irq (IRQF_ONESHOT is not set) * - Oneshot irq which did not wake the thread (caused by a * spurious interrupt or a primary handler handling it * completely).
*/ if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
unmask_irq(desc);
}
/** * handle_level_irq - Level type irq handler * @desc: the interrupt description structure for this irq * * Level type interrupts are active as long as the hardware line has the * active level. This may require to mask the interrupt and unmask it after * the associated handler has acknowledged the device, so the interrupt * line is back to inactive.
*/ void handle_level_irq(struct irq_desc *desc)
{
guard(raw_spinlock)(&desc->lock);
mask_ack_irq(desc);
staticvoid cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{ if (!(desc->istate & IRQS_ONESHOT)) {
chip->irq_eoi(&desc->irq_data); return;
} /* * We need to unmask in the following cases: * - Oneshot irq which did not wake the thread (caused by a * spurious interrupt or a primary handler handling it * completely).
*/ if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
chip->irq_eoi(&desc->irq_data);
unmask_irq(desc);
} elseif (!(chip->flags & IRQCHIP_EOI_THREADED)) {
chip->irq_eoi(&desc->irq_data);
}
}
/** * handle_fasteoi_irq - irq handler for transparent controllers * @desc: the interrupt description structure for this irq * * Only a single callback will be issued to the chip: an ->eoi() call when * the interrupt has been serviced. This enables support for modern forms * of interrupt handlers, which handle the flow details in hardware, * transparently.
*/ void handle_fasteoi_irq(struct irq_desc *desc)
{ struct irq_chip *chip = desc->irq_data.chip;
guard(raw_spinlock)(&desc->lock);
/* * When an affinity change races with IRQ handling, the next interrupt * can arrive on the new CPU before the original CPU has completed * handling the previous one - it may need to be resent.
*/ if (!irq_can_handle_pm(desc)) { if (irqd_needs_resend_when_in_progress(&desc->irq_data))
desc->istate |= IRQS_PENDING;
cond_eoi_irq(chip, &desc->irq_data); return;
}
if (!irq_can_handle_actions(desc)) {
mask_irq(desc);
cond_eoi_irq(chip, &desc->irq_data); return;
}
kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
/* * When the race described above happens this will resend the interrupt.
*/ if (unlikely(desc->istate & IRQS_PENDING))
check_irq_resend(desc, false);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
/** * handle_fasteoi_nmi - irq handler for NMI interrupt lines * @desc: the interrupt description structure for this irq * * A simple NMI-safe handler, considering the restrictions * from request_nmi. * * Only a single callback will be issued to the chip: an ->eoi() * call when the interrupt has been serviced. This enables support * for modern forms of interrupt handlers, which handle the flow * details in hardware, transparently.
*/ void handle_fasteoi_nmi(struct irq_desc *desc)
{ struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsignedint irq = irq_desc_get_irq(desc);
irqreturn_t res;
__kstat_incr_irqs_this_cpu(desc);
trace_irq_handler_entry(irq, action); /* * NMIs cannot be shared, there is only one action.
*/
res = action->handler(irq, action->dev_id);
trace_irq_handler_exit(irq, action, res);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
/** * handle_edge_irq - edge type IRQ handler * @desc: the interrupt description structure for this irq * * Interrupt occurs on the falling and/or rising edge of a hardware * signal. The occurrence is latched into the irq controller hardware and * must be acked in order to be reenabled. After the ack another interrupt * can happen on the same source even before the first one is handled by * the associated event handler. If this happens it might be necessary to * disable (mask) the interrupt depending on the controller hardware. This * requires to reenable the interrupt inside of the loop which handles the * interrupts which have arrived while the handler was running. If all * pending interrupts are handled, the loop is left.
*/ void handle_edge_irq(struct irq_desc *desc)
{
guard(raw_spinlock)(&desc->lock);
if (!irq_can_handle(desc)) {
desc->istate |= IRQS_PENDING;
mask_ack_irq(desc); return;
}
kstat_incr_irqs_this_cpu(desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
do { if (unlikely(!desc->action)) {
mask_irq(desc); return;
}
/* * When another irq arrived while we were handling * one, we could have masked the irq. * Reenable it, if it was not disabled in meantime.
*/ if (unlikely(desc->istate & IRQS_PENDING)) { if (!irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data))
unmask_irq(desc);
}
handle_irq_event(desc);
} while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data));
}
EXPORT_SYMBOL(handle_edge_irq);
/** * handle_percpu_irq - Per CPU local irq handler * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements
*/ void handle_percpu_irq(struct irq_desc *desc)
{ struct irq_chip *chip = irq_desc_get_chip(desc);
/* * PER CPU interrupts are not serialized. Do not touch * desc->tot_count.
*/
__kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
handle_irq_event_percpu(desc);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
/** * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids * @desc: the interrupt description structure for this irq * * Per CPU interrupts on SMP machines without locking requirements. Same as * handle_percpu_irq() above but with the following extras: * * action->percpu_dev_id is a pointer to percpu variables which * contain the real device id for the cpu on which this handler is * called
*/ void handle_percpu_devid_irq(struct irq_desc *desc)
{ struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsignedint irq = irq_desc_get_irq(desc);
irqreturn_t res;
/* * PER CPU interrupts are not serialized. Do not touch * desc->tot_count.
*/
__kstat_incr_irqs_this_cpu(desc);
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
if (likely(action)) {
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
} else { unsignedint cpu = smp_processor_id(); bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
if (enabled)
irq_percpu_disable(desc, cpu);
pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
enabled ? " and unmasked" : "", irq, cpu);
}
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
/** * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu * dev ids * @desc: the interrupt description structure for this irq * * Similar to handle_fasteoi_nmi, but handling the dev_id cookie * as a percpu pointer.
*/ void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
{ struct irq_chip *chip = irq_desc_get_chip(desc); struct irqaction *action = desc->action; unsignedint irq = irq_desc_get_irq(desc);
irqreturn_t res;
__kstat_incr_irqs_this_cpu(desc);
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
}
staticvoid
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained, constchar *name)
{ if (!handle) {
handle = handle_bad_irq;
} else { struct irq_data *irq_data = &desc->irq_data; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY /* * With hierarchical domains we might run into a * situation where the outermost chip is not yet set * up, but the inner chips are there. Instead of * bailing we install the handler, but obviously we * cannot enable/startup the interrupt at this point.
*/ while (irq_data) { if (irq_data->chip != &no_irq_chip) break; /* * Bail out if the outer chip is not set up * and the interrupt supposed to be started * right away.
*/ if (WARN_ON(is_chained)) return; /* Try the parent */
irq_data = irq_data->parent_data;
} #endif if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) return;
}
if (handle != handle_bad_irq && is_chained) { unsignedint type = irqd_get_trigger_type(&desc->irq_data);
/* * We're about to start this interrupt immediately, * hence the need to set the trigger configuration. * But the .set_type callback may have overridden the * flow handler, ignoring that we're dealing with a * chained interrupt. Reset it immediately because we * do know better.
*/ if (type != IRQ_TYPE_NONE) {
__irq_set_trigger(desc, type);
desc->handle_irq = handle;
}
#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE /** * irq_cpu_online - Invoke all irq_cpu_online functions. * * Iterate through all irqs and invoke the chip.irq_cpu_online() * for each.
*/ void irq_cpu_online(void)
{ unsignedint irq;
/** * irq_cpu_offline - Invoke all irq_cpu_offline functions. * * Iterate through all irqs and invoke the chip.irq_cpu_offline() * for each.
*/ void irq_cpu_offline(void)
{ unsignedint irq;
#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS /** * handle_fasteoi_ack_irq - irq handler for edge hierarchy stacked on * transparent controllers * * @desc: the interrupt description structure for this irq * * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip * also needs to have its ->irq_ack() function called.
*/ void handle_fasteoi_ack_irq(struct irq_desc *desc)
{ struct irq_chip *chip = desc->irq_data.chip;
guard(raw_spinlock)(&desc->lock);
if (!irq_can_handle_pm(desc)) {
cond_eoi_irq(chip, &desc->irq_data); return;
}
if (unlikely(!irq_can_handle_actions(desc))) {
mask_irq(desc);
cond_eoi_irq(chip, &desc->irq_data); return;
}
kstat_incr_irqs_this_cpu(desc); if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
/** * handle_fasteoi_mask_irq - irq handler for level hierarchy stacked on * transparent controllers * * @desc: the interrupt description structure for this irq * * Like handle_fasteoi_irq(), but for use with hierarchy where the irq_chip * also needs to have its ->irq_mask_ack() function called.
*/ void handle_fasteoi_mask_irq(struct irq_desc *desc)
{ struct irq_chip *chip = desc->irq_data.chip;
/** * irq_chip_set_parent_state - set the state of a parent interrupt. * * @data: Pointer to interrupt specific data * @which: State to be restored (one of IRQCHIP_STATE_*) * @val: Value corresponding to @which * * Conditional success, if the underlying irqchip does not implement it.
*/ int irq_chip_set_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool val)
{
data = data->parent_data;
if (!data || !data->chip->irq_set_irqchip_state) return 0;
/** * irq_chip_get_parent_state - get the state of a parent interrupt. * * @data: Pointer to interrupt specific data * @which: one of IRQCHIP_STATE_* the caller wants to know * @state: a pointer to a boolean where the state is to be stored * * Conditional success, if the underlying irqchip does not implement it.
*/ int irq_chip_get_parent_state(struct irq_data *data, enum irqchip_irq_state which, bool *state)
{
data = data->parent_data;
if (!data || !data->chip->irq_get_irqchip_state) return 0;
/** * irq_chip_shutdown_parent - Shutdown the parent interrupt * @data: Pointer to interrupt specific data * * Invokes the irq_shutdown() callback of the parent if available or falls * back to irq_chip_disable_parent().
*/ void irq_chip_shutdown_parent(struct irq_data *data)
{ struct irq_data *parent = data->parent_data;
if (parent->chip->irq_shutdown)
parent->chip->irq_shutdown(parent); else
irq_chip_disable_parent(data);
}
EXPORT_SYMBOL_GPL(irq_chip_shutdown_parent);
/** * irq_chip_startup_parent - Startup the parent interrupt * @data: Pointer to interrupt specific data * * Invokes the irq_startup() callback of the parent if available or falls * back to irq_chip_enable_parent().
*/ unsignedint irq_chip_startup_parent(struct irq_data *data)
{ struct irq_data *parent = data->parent_data;
if (parent->chip->irq_startup) return parent->chip->irq_startup(parent);
/** * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if * NULL) * @data: Pointer to interrupt specific data
*/ void irq_chip_enable_parent(struct irq_data *data)
{
data = data->parent_data; if (data->chip->irq_enable)
data->chip->irq_enable(data); else
data->chip->irq_unmask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
/** * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if * NULL) * @data: Pointer to interrupt specific data
*/ void irq_chip_disable_parent(struct irq_data *data)
{
data = data->parent_data; if (data->chip->irq_disable)
data->chip->irq_disable(data); else
data->chip->irq_mask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
/** * irq_chip_ack_parent - Acknowledge the parent interrupt * @data: Pointer to interrupt specific data
*/ void irq_chip_ack_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_ack(data);
}
EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
/** * irq_chip_mask_parent - Mask the parent interrupt * @data: Pointer to interrupt specific data
*/ void irq_chip_mask_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_mask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
/** * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt * @data: Pointer to interrupt specific data
*/ void irq_chip_mask_ack_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_mask_ack(data);
}
EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
/** * irq_chip_unmask_parent - Unmask the parent interrupt * @data: Pointer to interrupt specific data
*/ void irq_chip_unmask_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_unmask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
/** * irq_chip_eoi_parent - Invoke EOI on the parent interrupt * @data: Pointer to interrupt specific data
*/ void irq_chip_eoi_parent(struct irq_data *data)
{
data = data->parent_data;
data->chip->irq_eoi(data);
}
EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
/** * irq_chip_set_affinity_parent - Set affinity on the parent interrupt * @data: Pointer to interrupt specific data * @dest: The affinity mask to set * @force: Flag to enforce setting (disable online checks) * * Conditional, as the underlying parent chip might not implement it.
*/ int irq_chip_set_affinity_parent(struct irq_data *data, conststruct cpumask *dest, bool force)
{
data = data->parent_data; if (data->chip->irq_set_affinity) return data->chip->irq_set_affinity(data, dest, force);
/** * irq_chip_set_type_parent - Set IRQ type on the parent interrupt * @data: Pointer to interrupt specific data * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h * * Conditional, as the underlying parent chip might not implement it.
*/ int irq_chip_set_type_parent(struct irq_data *data, unsignedint type)
{
data = data->parent_data;
if (data->chip->irq_set_type) return data->chip->irq_set_type(data, type);
/** * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware * @data: Pointer to interrupt specific data * * Iterate through the domain hierarchy of the interrupt and check * whether a hw retrigger function exists. If yes, invoke it.
*/ int irq_chip_retrigger_hierarchy(struct irq_data *data)
{ for (data = data->parent_data; data; data = data->parent_data) if (data->chip && data->chip->irq_retrigger) return data->chip->irq_retrigger(data);
/** * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt * @data: Pointer to interrupt specific data * @vcpu_info: The vcpu affinity information
*/ int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
{
data = data->parent_data; if (data->chip->irq_set_vcpu_affinity) return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent); /** * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt * @data: Pointer to interrupt specific data * @on: Whether to set or reset the wake-up capability of this irq * * Conditional, as the underlying parent chip might not implement it.
*/ int irq_chip_set_wake_parent(struct irq_data *data, unsignedint on)
{
data = data->parent_data;
if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE) return 0;
if (data->chip->irq_set_wake) return data->chip->irq_set_wake(data, on);
/** * irq_chip_request_resources_parent - Request resources on the parent interrupt * @data: Pointer to interrupt specific data
*/ int irq_chip_request_resources_parent(struct irq_data *data)
{
data = data->parent_data;
if (data->chip->irq_request_resources) return data->chip->irq_request_resources(data);
/* no error on missing optional irq_chip::irq_request_resources */ return 0;
}
EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
/** * irq_chip_release_resources_parent - Release resources on the parent interrupt * @data: Pointer to interrupt specific data
*/ void irq_chip_release_resources_parent(struct irq_data *data)
{
data = data->parent_data; if (data->chip->irq_release_resources)
data->chip->irq_release_resources(data);
}
EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent); #endif
/** * irq_chip_compose_msi_msg - Compose msi message for a irq chip * @data: Pointer to interrupt specific data * @msg: Pointer to the MSI message * * For hierarchical domains we find the first chip in the hierarchy * which implements the irq_compose_msi_msg callback. For non * hierarchical we use the top level chip.
*/ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{ struct irq_data *pos;
for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) { if (data->chip && data->chip->irq_compose_msi_msg)
pos = data;
}
staticstruct device *irq_get_pm_device(struct irq_data *data)
{ if (data->domain) return data->domain->pm_dev;
return NULL;
}
/** * irq_chip_pm_get - Enable power for an IRQ chip * @data: Pointer to interrupt specific data * * Enable the power to the IRQ chip referenced by the interrupt data * structure.
*/ int irq_chip_pm_get(struct irq_data *data)
{ struct device *dev = irq_get_pm_device(data); int retval = 0;
if (IS_ENABLED(CONFIG_PM) && dev)
retval = pm_runtime_resume_and_get(dev);
return retval;
}
/** * irq_chip_pm_put - Disable power for an IRQ chip * @data: Pointer to interrupt specific data * * Disable the power to the IRQ chip referenced by the interrupt data * structure, belongs. Note that power will only be disabled, once this * function has been called for all IRQs that have called irq_chip_pm_get().
*/ int irq_chip_pm_put(struct irq_data *data)
{ struct device *dev = irq_get_pm_device(data); int retval = 0;
if (IS_ENABLED(CONFIG_PM) && dev)
retval = pm_runtime_put(dev);
return (retval < 0) ? retval : 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.18 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.