// SPDX-License-Identifier: GPL-2.0 /* irq.c: UltraSparc IRQ handling/init/registry. * * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
/* On several sun4u processors, it is illegal to mix bypass and * non-bypass accesses. Therefore we access all INO buckets * using bypass accesses only.
*/ staticunsignedlong bucket_get_chain_pa(unsignedlong bucket_pa)
{ unsignedlong ret;
/* Major version 2.0 of HV_GRP_INTR added support for the VIRQ cookie * based interfaces, but: * * 1) Several OSs, Solaris and Linux included, use them even when only * negotiating version 1.0 (or failing to negotiate at all). So the * hypervisor has a workaround that provides the VIRQ interfaces even * when only verion 1.0 of the API is in use. * * 2) Second, and more importantly, with major version 2.0 these VIRQ * interfaces only were actually hooked up for LDC interrupts, even * though the Hypervisor specification clearly stated: * * The new interrupt API functions will be available to a guest * when it negotiates version 2.0 in the interrupt API group 0x2. When * a guest negotiates version 2.0, all interrupt sources will only * support using the cookie interface, and any attempt to use the * version 1.0 interrupt APIs numbered 0xa0 to 0xa6 will result in the * ENOTSUPPORTED error being returned. * * with an emphasis on "all interrupt sources". * * To correct this, major version 3.0 was created which does actually * support VIRQs for all interrupt sources (not just LDC devices). So * if we want to move completely over the cookie based VIRQs we must * negotiate major version 3.0 or later of HV_GRP_INTR.
*/ staticbool sun4v_cookie_only_virqs(void)
{ return hv_irq_version >= 3;
}
staticvoid __init irq_init_hv(void)
{ unsignedlong hv_error, major, minor = 0;
if (tlb_type != hypervisor) return;
if (hvirq_major)
major = hvirq_major; else
major = 3;
val = upa_readq(imap);
val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
IMAP_AID_SAFARI | IMAP_NID_SAFARI);
val |= tid | IMAP_VALID;
upa_writeq(val, imap);
upa_writeq(ICLR_IDLE, handler_data->iclr);
}
return 0;
}
/* Don't do anything. The desc->status check for IRQ_DISABLED in * handler_irq() will skip the handler call and that will leave the * interrupt in the sent state. The next ->enable() call will hit the * ICLR register to reset the state machine. * * This scheme is necessary, instead of clearing the Valid bit in the * IMAP register, to handle the case of IMAP registers being shared by * multiple INOs (and thus ICLR registers). Since we use a different * virtual IRQ for each shared IMAP instance, the generic code thinks * there is only one user so it prematurely calls ->disable() on * free_irq(). * * We have to provide an explicit ->disable() method instead of using * NULL to get the default. The reason is that if the generic code * sees that, it also hooks up a default ->shutdown method which * invokes ->mask() which we do not want. See irq_chip_set_defaults().
*/ staticvoid sun4u_irq_disable(struct irq_data *data)
{
}
/* handler_irq needs to find the irq. cookie is seen signed in * sun4v_dev_mondo and treated as a non ivector_table delivery.
*/
ihd->bucket.__irq = irq;
cookie = ~__pa(&ihd->bucket);
hv_error = sun4v_vintr_set_cookie(devhandle, devino, cookie); if (hv_error)
pr_err("HV vintr set cookie failed = %ld\n", hv_error);
/* PROM timer node hangs out in the top level of device siblings... */
dp = of_find_node_by_path("/");
dp = dp->child; while (dp) { if (of_node_name_eq(dp, "counter-timer")) break;
dp = dp->sibling;
}
/* Assume if node is not present, PROM uses different tick mechanism * which we should not care about.
*/ if (!dp) {
prom_timers = (struct sun5_timer *) 0; return;
}
/* If PROM is really using this, it must be mapped by him. */
addr = of_get_property(dp, "address", NULL); if (!addr) {
prom_printf("PROM does not have timer mapped, trying to continue.\n");
prom_timers = (struct sun5_timer *) 0; return;
}
prom_timers = (struct sun5_timer *) ((unsignedlong)addr[0]);
}
staticvoid kill_prom_timer(void)
{ if (!prom_timers) return;
/* Save them away for later. */
prom_limit0 = prom_timers->limit0;
prom_limit1 = prom_timers->limit1;
/* Just as in sun4c PROM uses timer which ticks at IRQ 14. * We turn both off here just to be paranoid.
*/
prom_timers->limit0 = 0;
prom_timers->limit1 = 0;
void notrace init_irqwork_curcpu(void)
{ int cpu = hard_smp_processor_id();
trap_block[cpu].irq_worklist_pa = 0UL;
}
/* Please be very careful with register_one_mondo() and * sun4v_register_mondo_queues(). * * On SMP this gets invoked from the CPU trampoline before * the cpu has fully taken over the trap table from OBP, * and its kernel stack + %g6 thread register state is * not fully cooked yet. * * Therefore you cannot make any OBP calls, not even prom_printf, * from these two routines.
*/ staticvoid notrace register_one_mondo(unsignedlong paddr, unsignedlong type, unsignedlong qmask)
{ unsignedlong num_entries = (qmask + 1) / 64; unsignedlong status;
/* Each queue region must be a power of 2 multiple of 64 bytes in * size. The base real address must be aligned to the size of the * region. Thus, an 8KB queue must be 8KB aligned, for example.
*/ staticvoid __init alloc_one_queue(unsignedlong *pa_ptr, unsignedlong qmask)
{ unsignedlong size = PAGE_ALIGN(qmask + 1); unsignedlong order = get_order(size); unsignedlong p;
p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!p) {
prom_printf("SUN4V: Error, cannot allocate queue.\n");
prom_halt();
}
/* Make sure mondo block is 64byte aligned */
p = kzalloc(127, GFP_KERNEL); if (!p) {
prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
prom_halt();
}
mondo = (void *)(((unsignedlong)p + 63) & ~0x3f);
tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL); if (!page) {
prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
tb->cpu_list_pa = __pa(page); #endif
}
/* Allocate mondo and error queues for all possible cpus. */ staticvoid __init sun4v_init_mondo_queues(void)
{ int cpu;
/* Only invoked on boot processor.*/ void __init init_IRQ(void)
{
irq_init_hv();
irq_ivector_init();
map_prom_timers();
kill_prom_timer();
if (tlb_type == hypervisor)
sun4v_init_mondo_queues();
init_send_mondo_info();
if (tlb_type == hypervisor) { /* Load up the boot cpu's entries. */
sun4v_register_mondo_queues(hard_smp_processor_id());
}
/* We need to clear any IRQ's pending in the soft interrupt * registers, a spurious one could be left around from the * PROM timer which we just disabled.
*/
clear_softint(get_softint());
/* Now that ivector table is initialized, it is safe * to receive IRQ vector traps. We will normally take * one or two right now, in case some device PROM used * to boot us wants to speak to us. We just ignore them.
*/
__asm__ __volatile__("rdpr %%pstate, %%g1\n\t" "or %%g1, %0, %%g1\n\t" "wrpr %%g1, 0x0, %%pstate"
: /* No outputs */
: "i" (PSTATE_IE)
: "g1");
irq_to_desc(0)->action = &timer_irq_action;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.13 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.