/* * The XIVE Interrupt source numbers are within the range 0 to * KVMPPC_XICS_NR_IRQS.
*/ #define KVMPPC_XIVE_FIRST_IRQ 0 #define KVMPPC_XIVE_NR_IRQS KVMPPC_XICS_NR_IRQS
/* * State for one guest irq source. * * For each guest source we allocate a HW interrupt in the XIVE * which we use for all SW triggers. It will be unused for * pass-through but it's easier to keep around as the same * guest interrupt can alternatively be emulated or pass-through * if a physical device is hot unplugged and replaced with an * emulated one. * * This state structure is very similar to the XICS one with * additional XIVE specific tracking.
*/ struct kvmppc_xive_irq_state { bool valid; /* Interrupt entry is valid */
u32 number; /* Guest IRQ number */
u32 ipi_number; /* XIVE IPI HW number */ struct xive_irq_data ipi_data; /* XIVE IPI associated data */
u32 pt_number; /* XIVE Pass-through number if any */ struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
/* Targetting as set by guest */
u8 guest_priority; /* Guest set priority */
u8 saved_priority; /* Saved priority when masking */
/* Actual targetting */
u32 act_server; /* Actual server */
u8 act_priority; /* Actual priority */
/* Various state bits */ bool in_eoi; /* Synchronize with H_EOI */ bool old_p; /* P bit state when masking */ bool old_q; /* Q bit state when masking */ bool lsi; /* level-sensitive interrupt */ bool asserted; /* Only for emulated LSI: current state */
/* Saved for migration state */ bool in_queue; bool saved_p; bool saved_q;
u8 saved_scan_prio;
/* Select the "right" interrupt (IPI vs. passthrough) */ staticinlinevoid kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
u32 *out_hw_irq, struct xive_irq_data **out_xd)
{ if (state->pt_number) { if (out_hw_irq)
*out_hw_irq = state->pt_number; if (out_xd)
*out_xd = state->pt_data;
} else { if (out_hw_irq)
*out_hw_irq = state->ipi_number; if (out_xd)
*out_xd = &state->ipi_data;
}
}
/* * This corresponds to an "ICS" in XICS terminology, we use it * as a mean to break up source information into multiple structures.
*/ struct kvmppc_xive_src_block {
arch_spinlock_t lock;
u16 id; struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
};
struct kvmppc_xive;
struct kvmppc_xive_ops { int (*reset_mapped)(struct kvm *kvm, unsignedlong guest_irq);
};
/* * For state save, we lazily scan the queues on the first interrupt * being migrated. We don't have a clean way to reset that flags * so we keep track of the number of valid sources and how many of * them were migrated so we can reset when all of them have been * processed.
*/
u32 src_count;
u32 saved_src_count;
/* * Some irqs are delayed on restore until the source is created, * keep track here of how many of them
*/
u32 delayed_irqs;
/* Which queues (priorities) are in use by the guest */
u8 qmap;
/* Queue orders */
u32 q_order;
u32 q_page_order;
/* Flags */
u8 flags;
/* Number of entries in the VP block */
u32 nr_servers;
/* Server number. This is the HW CPU ID from a guest perspective */
u32 server_num;
/* * HW VP corresponding to this VCPU. This is the base of the VP * block plus the server number.
*/
u32 vp_id;
u32 vp_chip_id;
u32 vp_cam;
/* IPI used for sending ... IPIs */
u32 vp_ipi; struct xive_irq_data vp_ipi_data;
/* Local emulation state */
uint8_t cppr; /* guest CPPR */
uint8_t hw_cppr;/* Hardware CPPR */
uint8_t mfrr;
uint8_t pending;
/* Each VP has 8 queues though we only provision some */ struct xive_q queues[KVMPPC_XIVE_Q_COUNT];
u32 esc_virq[KVMPPC_XIVE_Q_COUNT]; char *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
/* Stash a delayed irq on restore from migration (see set_icp) */
u32 delayed_irq;
if (source)
*source = src; if (bid > KVMPPC_XICS_MAX_ICS_ID) return NULL; return xive->src_blocks[bid];
}
/* * When the XIVE resources are allocated at the HW level, the VP * structures describing the vCPUs of a guest are distributed among * the chips to optimize the PowerBUS usage. For best performance, the * guest vCPUs can be pinned to match the VP structure distribution. * * Currently, the VP identifiers are deduced from the vCPU id using * the kvmppc_pack_vcpu_id() routine which is not incorrect but not * optimal either. It VSMT is used, the result is not continuous and * the constraints on HW resources described above can not be met.
*/ staticinline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
{ return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
}
/* * Mapping between guest priorities and host priorities * is as follow. * * Guest request for 0...6 are honored. Guest request for anything * higher results in a priority of 6 being applied. * * Similar mapping is done for CPPR values
*/ staticinline u8 xive_prio_from_guest(u8 prio)
{ if (prio == 0xff || prio < 6) return prio; return 6;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.