/* * history: Bitmask tracking errors occurrence. Each set bit * represents an error seen. * * timestamp: Last time (in jiffies) that the bank was polled. * in_storm_mode: Is this bank in storm mode? * poll_only: Bank does not support CMCI, skip storm tracking.
*/ struct storm_bank {
u64 history;
u64 timestamp; bool in_storm_mode; bool poll_only;
};
/* How many errors within the history buffer mark the start of a storm. */ #define STORM_BEGIN_THRESHOLD 5
/* * How many polls of machine check bank without an error before declaring * the storm is over. Since it is tracked by the bitmasks in the history * field of struct storm_bank the mask is 30 bits [0 ... 29].
*/ #define STORM_END_POLL_THRESHOLD 29
/* * banks: per-cpu, per-bank details * stormy_bank_count: count of MC banks in storm state * poll_mode: CPU is in poll mode
*/ struct mca_storm_desc { struct storm_bank banks[MAX_NR_BANKS];
u8 stormy_bank_count; bool poll_mode;
};
/* * We consider records to be equivalent if bank+status+addr+misc all match. * This is only used when the system is going down because of a fatal error * to avoid cluttering the console log with essentially repeated information. * In normal processing all errors seen are logged.
*/ staticinlinebool mce_cmp(struct mce *m1, struct mce *m2)
{ return m1->bank != m2->bank ||
m1->status != m2->status ||
m1->addr != m2->addr ||
m1->misc != m2->misc;
}
struct mce_vendor_flags { /* * Indicates that overflow conditions are not fatal, when set.
*/
__u64 overflow_recov : 1,
/* * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and * Recovery. It indicates support for data poisoning in HW and deferred * error interrupts.
*/
succor : 1,
/* * (AMD) SMCA: This bit indicates support for Scalable MCA which expands * the register space for each MCA bank and also increases number of * banks. Also, to accommodate the new banks and registers, the MCA * register space is moved to a new MSR range.
*/
smca : 1,
/* Skylake, Cascade Lake, Cooper Lake REP;MOVS* quirk */
skx_repmov_quirk : 1,
__reserved_0 : 55;
};
externstruct mce_vendor_flags mce_flags;
struct mce_bank { /* subevents to enable */
u64 ctl;
/* initialise bank? */
__u64 init : 1,
/* * (AMD) MCA_CONFIG[McaLsbInStatusSupported]: When set, this bit indicates * the LSB field is found in MCA_STATUS and not in MCA_ADDR.
*/
lsb_in_status : 1,
/* Decide whether to add MCE record to MCE event pool or filter it out. */ externbool filter_mce(struct mce *m); void mce_prep_record_common(struct mce *m); void mce_prep_record_per_cpu(unsignedint cpu, struct mce *m);
/* * If MCA_CONFIG[McaLsbInStatusSupported] is set, extract ErrAddr in bits * [56:0] of MCA_STATUS, else in bits [55:0] of MCA_ADDR.
*/ static __always_inline void smca_extract_err_addr(struct mce *m)
{
u8 lsb;
if (!mce_flags.smca) return;
if (this_cpu_ptr(mce_banks_array)[m->bank].lsb_in_status) {
lsb = (m->status >> 24) & 0x3f;
static __always_inline u32 mca_msr_reg(int bank, enum mca_msr reg)
{ if (cpu_feature_enabled(X86_FEATURE_SMCA)) { switch (reg) { case MCA_CTL: return MSR_AMD64_SMCA_MCx_CTL(bank); case MCA_ADDR: return MSR_AMD64_SMCA_MCx_ADDR(bank); case MCA_MISC: return MSR_AMD64_SMCA_MCx_MISC(bank); case MCA_STATUS: return MSR_AMD64_SMCA_MCx_STATUS(bank);
}
}
switch (reg) { case MCA_CTL: return MSR_IA32_MCx_CTL(bank); case MCA_ADDR: return MSR_IA32_MCx_ADDR(bank); case MCA_MISC: return MSR_IA32_MCx_MISC(bank); case MCA_STATUS: return MSR_IA32_MCx_STATUS(bank);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.