// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar * Copyright (C) 2005-2006, Thomas Gleixner, Russell King * * This file contains the interrupt descriptor management code. Detailed * information is available in Documentation/core-api/genericirq.rst *
*/ #include <linux/irq.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/maple_tree.h> #include <linux/irqdomain.h> #include <linux/sysfs.h> #include <linux/string_choices.h>
#include"internals.h"
/* * lockdep: we want to handle all irq_desc locks as a single lock-class:
*/ staticstruct lock_class_key irq_desc_lock_class;
#ifdefined(CONFIG_SMP) staticint __init irq_affinity_setup(char *str)
{
alloc_bootmem_cpumask_var(&irq_default_affinity);
cpulist_parse(str, irq_default_affinity); /* * Set at least the boot cpu. We don't want to end up with * bugreports caused by random commandline masks
*/
cpumask_set_cpu(smp_processor_id(), irq_default_affinity); return 1;
}
__setup("irqaffinity=", irq_affinity_setup);
staticvoid __init init_irq_default_affinity(void)
{ if (!cpumask_available(irq_default_affinity))
zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); if (cpumask_empty(irq_default_affinity))
cpumask_setall(irq_default_affinity);
} #else staticvoid __init init_irq_default_affinity(void)
{
} #endif
#ifdef CONFIG_SMP staticint alloc_masks(struct irq_desc *desc, int node)
{ if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
GFP_KERNEL, node)) return -ENOMEM;
/** * irq_get_nr_irqs() - Number of interrupts supported by the system.
*/ unsignedint irq_get_nr_irqs(void)
{ return nr_irqs;
}
EXPORT_SYMBOL_GPL(irq_get_nr_irqs);
/** * irq_set_nr_irqs() - Set the number of interrupts supported by the system. * @nr: New number of interrupts. * * Return: @nr.
*/ unsignedint irq_set_nr_irqs(unsignedint nr)
{
nr_irqs = nr;
staticvoid irq_sysfs_add(int irq, struct irq_desc *desc)
{ if (irq_kobj_base) { /* * Continue even in case of failure as this is nothing * crucial and failures in the late irq_sysfs_init() * cannot be rolled back.
*/ if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
pr_warn("Failed to add kobject for irq %d\n", irq); else
desc->istate |= IRQS_SYSFS;
}
}
staticvoid irq_sysfs_del(struct irq_desc *desc)
{ /* * Only invoke kobject_del() when kobject_add() was successfully * invoked for the descriptor. This covers both early boot, where * sysfs is not initialized yet, and the case of a failed * kobject_add() invocation.
*/ if (desc->istate & IRQS_SYSFS)
kobject_del(&desc->kobj);
}
staticint __init irq_sysfs_init(void)
{ struct irq_desc *desc; int irq;
/* * sparse_irq_lock protects also show_interrupts() and * kstat_irq_usr(). Once we deleted the descriptor from the * sparse tree we can free it. Access in proc will fail to * lookup the descriptor. * * The sysfs entry must be serialized against a concurrent * irq_sysfs_init() as well.
*/
irq_sysfs_del(desc);
delete_irq_desc(irq);
/* * We free the descriptor, masks and stat fields via RCU. That * allows demultiplex interrupts to do rcu based management of * the child interrupts. * This also allows us to use rcu in kstat_irqs_usr().
*/
call_rcu(&desc->rcu, delayed_free_desc);
}
staticint alloc_descs(unsignedint start, unsignedint cnt, int node, conststruct irq_affinity_desc *affinity, struct module *owner)
{ struct irq_desc *desc; int i;
/* Validate affinity mask(s) */ if (affinity) { for (i = 0; i < cnt; i++) { if (cpumask_empty(&affinity[i].mask)) return -EINVAL;
}
}
for (i = 0; i < cnt; i++) { conststruct cpumask *mask = NULL; unsignedint flags = 0;
int handle_irq_desc(struct irq_desc *desc)
{ struct irq_data *data;
if (!desc) return -EINVAL;
data = irq_desc_get_irq_data(desc); if (WARN_ON_ONCE(!in_hardirq() && irqd_is_handle_enforce_irqctx(data))) return -EPERM;
generic_handle_irq_desc(desc); return 0;
}
/** * generic_handle_irq - Invoke the handler for a particular irq * @irq: The irq number to handle * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an IRQ context with irq regs * initialized.
*/ int generic_handle_irq(unsignedint irq)
{ return handle_irq_desc(irq_to_desc(irq));
}
EXPORT_SYMBOL_GPL(generic_handle_irq);
/** * generic_handle_irq_safe - Invoke the handler for a particular irq from any * context. * @irq: The irq number to handle * * Returns: 0 on success, a negative value on error. * * This function can be called from any context (IRQ or process context). It * will report an error if not invoked from IRQ context and the irq has been * marked to enforce IRQ-context only.
*/ int generic_handle_irq_safe(unsignedint irq)
{ unsignedlong flags; int ret;
local_irq_save(flags);
ret = handle_irq_desc(irq_to_desc(irq));
local_irq_restore(flags); return ret;
}
EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
#ifdef CONFIG_IRQ_DOMAIN /** * generic_handle_domain_irq - Invoke the handler for a HW irq belonging * to a domain. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an IRQ context with irq regs * initialized.
*/ int generic_handle_domain_irq(struct irq_domain *domain, unsignedint hwirq)
{ return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
}
EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
/** * generic_handle_irq_safe - Invoke the handler for a HW irq belonging * to a domain from any context. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, a negative value on error. * * This function can be called from any context (IRQ or process * context). If the interrupt is marked as 'enforce IRQ-context only' then * the function must be invoked from hard interrupt context.
*/ int generic_handle_domain_irq_safe(struct irq_domain *domain, unsignedint hwirq)
{ unsignedlong flags; int ret;
local_irq_save(flags);
ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq));
local_irq_restore(flags); return ret;
}
EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe);
/** * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging * to a domain. * @domain: The domain where to perform the lookup * @hwirq: The HW irq number to convert to a logical one * * Returns: 0 on success, or -EINVAL if conversion has failed * * This function must be called from an NMI context with irq regs * initialized.
**/ int generic_handle_domain_nmi(struct irq_domain *domain, unsignedint hwirq)
{
WARN_ON_ONCE(!in_nmi()); return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
} #endif
/* Dynamic interrupt handling */
/** * irq_free_descs - free irq descriptors * @from: Start of descriptor range * @cnt: Number of consecutive irqs to free
*/ void irq_free_descs(unsignedint from, unsignedint cnt)
{ int i;
guard(mutex)(&sparse_irq_lock); for (i = 0; i < cnt; i++)
free_desc(from + i);
}
EXPORT_SYMBOL_GPL(irq_free_descs);
/** * __irq_alloc_descs - allocate and initialize a range of irq descriptors * @irq: Allocate for specific irq number if irq >= 0 * @from: Start the search from this irq number * @cnt: Number of consecutive irqs to allocate. * @node: Preferred node on which the irq descriptor should be allocated * @owner: Owning module (can be NULL) * @affinity: Optional pointer to an affinity mask array of size @cnt which * hints where the irq descriptors should be allocated and which * default affinities to use * * Returns the first irq number or error code
*/ int __ref __irq_alloc_descs(int irq, unsignedint from, unsignedint cnt, int node, struct module *owner, conststruct irq_affinity_desc *affinity)
{ int start;
if (!cnt) return -EINVAL;
if (irq >= 0) { if (from > irq) return -EINVAL;
from = irq;
} else { /* * For interrupts which are freely allocated the * architecture can force a lower bound to the @from * argument. x86 uses this to exclude the GSI space.
*/
from = arch_dynirq_lower_bound(from);
}
/** * irq_get_next_irq - get next allocated irq number * @offset: where to start the search * * Returns next irq number after offset or nr_irqs if none is found.
*/ unsignedint irq_get_next_irq(unsignedint offset)
{ return irq_find_at_or_after(offset);
}
/** * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu * @irq: The interrupt number * @cpu: The cpu number * * Returns the sum of interrupt counts on @cpu since boot for * @irq. The caller must ensure that the interrupt is not removed * concurrently.
*/ unsignedint kstat_irqs_cpu(unsignedint irq, int cpu)
{ struct irq_desc *desc = irq_to_desc(irq);
/** * kstat_irqs_usr - Get the statistics for an interrupt from thread context * @irq: The interrupt number * * Returns the sum of interrupt counts on all cpus since boot for @irq. * * It uses rcu to protect the access since a concurrent removal of an * interrupt descriptor is observing an rcu grace period before * delayed_free_desc()/irq_kobj_release().
*/ unsignedint kstat_irqs_usr(unsignedint irq)
{ unsignedint sum;
rcu_read_lock();
sum = kstat_irqs(irq);
rcu_read_unlock(); return sum;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.