/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * This control block defines the PACA which defines the processor * specific data for each logical processor on the system. * There are some pointers defined that are utilized by PLIC. * * C 2001 PPC 64 Team, IBM Corp
*/ #ifndef _ASM_POWERPC_PACA_H #define _ASM_POWERPC_PACA_H #ifdef __KERNEL__
#ifdefined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP) externunsignedint debug_smp_processor_id(void); /* from linux/smp.h */ /* * Add standard checks that preemption cannot occur when using get_paca(): * otherwise the paca_struct it points to may be the wrong one just after.
*/ #define get_paca() ((void) debug_smp_processor_id(), local_paca) #else #define get_paca() local_paca #endif
/* * Defines the layout of the paca. * * This structure is not directly accessed by firmware or the service * processor.
*/ struct paca_struct { #ifdef CONFIG_PPC_PSERIES /* * Because hw_cpu_id, unlike other paca fields, is accessed * routinely from other CPUs (from the IRQ code), we stick to * read-only (after boot) fields in the first cacheline to * avoid cacheline bouncing.
*/
struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */ #endif/* CONFIG_PPC_PSERIES */
/* * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c * load lock_token and paca_index with a single lwz * instruction. They must travel together and be properly * aligned.
*/ #ifdef __BIG_ENDIAN__
u16 lock_token; /* Constant 0x8000, used in locks */
u16 paca_index; /* Logical processor number */ #else
u16 paca_index; /* Logical processor number */
u16 lock_token; /* Constant 0x8000, used in locks */ #endif
#ifndef CONFIG_PPC_KERNEL_PCREL
u64 kernel_toc; /* Kernel TOC address */ #endif
u64 kernelbase; /* Base address of kernel */
u64 kernel_msr; /* MSR while running in kernel */ void *emergency_sp; /* pointer to emergency stack */
u64 data_offset; /* per cpu data offset */
s16 hw_cpu_id; /* Physical processor number */
u8 cpu_start; /* At startup, processor spins until */ /* this becomes non-zero. */
u8 kexec_state; /* set when kexec down has irqs off */ #ifdef CONFIG_PPC_BOOK3S_64 #ifdef CONFIG_PPC_64S_HASH_MMU struct slb_shadow *slb_shadow_ptr; #endif struct dtl_entry *dispatch_log; struct dtl_entry *dispatch_log_end; #endif
u64 dscr_default; /* per-CPU default DSCR */
#ifdef CONFIG_PPC_BOOK3S_64 /* * Now, starting in cacheline 2, the exception save areas
*/ /* used for most interrupts/exceptions */
u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
#ifdef CONFIG_PPC_BOOK3E_64
u64 exgen[8] __aligned(0x40); /* Keep pgd in the same cacheline as the start of extlb */
pgd_t *pgd __aligned(0x40); /* Current PGD */
pgd_t *kernel_pgd; /* Kernel PGD */
/* Shared by all threads of a core -- points to tcd of first thread */ struct tlb_core_data *tcd_ptr;
/* * We can have up to 3 levels of reentrancy in the TLB miss handler, * in each of four exception levels (normal, crit, mcheck, debug).
*/
u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];
u64 exmc[8]; /* used for machine checks */
u64 excrit[8]; /* used for crit interrupts */
u64 exdbg[8]; /* used for debug interrupts */
/* Kernel stack pointers for use by special exceptions */ void *mc_kstack; void *crit_kstack; void *dbg_kstack;
/* * then miscellaneous read-write fields
*/ struct task_struct *__current; /* Pointer to current */
u64 kstack; /* Saved Kernel stack addr */
u64 saved_r1; /* r1 save for RTAS calls or PM or EE=0 */
u64 saved_msr; /* MSR saved here by enter_rtas */
u64 exit_save_r1; /* Syscall/interrupt R1 save */ #ifdef CONFIG_PPC_BOOK3E_64
u16 trap_save; /* Used when bad stack is encountered */ #endif #ifdef CONFIG_PPC_BOOK3S_64
u8 hsrr_valid; /* HSRRs set for HRFID */
u8 srr_valid; /* SRRs set for RFID */ #endif
u8 irq_soft_mask; /* mask for irq soft masking */
u8 irq_happened; /* irq happened while soft-disabled */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
u8 pmcregs_in_use; /* pseries puts this in lppaca */ #endif
u64 sprg_vdso; /* Saved user-visible sprg */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u64 tm_scratch; /* TM scratch area for reclaim */ #endif
#ifdef CONFIG_PPC_POWERNV /* PowerNV idle fields */ /* PNV_CORE_IDLE_* bits, all siblings work on thread 0 paca */ unsignedlong idle_lock; /* A value of 1 means acquired */ unsignedlong idle_state; union { /* P7/P8 specific fields */ struct { /* PNV_THREAD_RUNNING/NAP/SLEEP */
u8 thread_idle_state; /* Mask to denote subcore sibling threads */
u8 subcore_sibling_mask;
};
/* P9 specific fields */ struct { #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* The PSSCR value that the kernel requested before going to stop */
u64 requested_psscr; /* Flag to request this thread not to stop */
atomic_t dont_stop; #endif
};
}; #endif
#ifdef CONFIG_PPC_BOOK3S_64 /* Non-maskable exceptions that are not performance critical */
u64 exnmi[EX_SIZE]; /* used for system reset (nmi) */
u64 exmc[EX_SIZE]; /* used for machine checks */ /* Exclusive stacks for system reset and machine check exception. */ void *nmi_emergency_sp; void *mc_emergency_sp;
u16 in_nmi; /* In nmi handler */
/* * Flag to check whether we are in machine check early handler * and already using emergency stack.
*/
u16 in_mce;
u8 hmi_event_available; /* HMI event is available */
u8 hmi_p9_special_emu; /* HMI P9 special emulation */
u32 hmi_irqs; /* HMI irq stat */ #endif
u8 ftrace_enabled; /* Hard disable ftrace */
/* Stuff for accurate time accounting */ struct cpu_accounting_data accounting;
u64 dtl_ridx; /* read index in dispatch log */ struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
#ifdef CONFIG_KVM_BOOK3S_HANDLER #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE /* We use this to store guest state in */ struct kvmppc_book3s_shadow_vcpu shadow_vcpu; #endif struct kvmppc_host_state kvm_hstate; #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for * more details
*/ struct sibling_subcore_state *sibling_subcore_state; #endif #endif #ifdef CONFIG_PPC_BOOK3S_64 /* * rfi fallback flush must be in its own cacheline to prevent * other paca data leaking into the L1d
*/
u64 exrfi[EX_SIZE] __aligned(0x80); void *rfi_flush_fallback_area;
u64 l1d_flush_size; #endif #ifdef CONFIG_PPC_PSERIES
u8 *mce_data_buf; /* buffer to hold per cpu rtas errlog */ #endif/* CONFIG_PPC_PSERIES */
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.