/* * Encode an eventsel+umask pair into event-select MSR format. Note, this is * technically AMD's format, as Intel's format only supports 8 bits for the * event selector, i.e. doesn't use bits 24:16 for the selector. But, OR-ing * in '0' is a nop and won't clobber the CMASK.
*/ #define RAW_EVENT(eventsel, umask) (((eventsel & 0xf00UL) << 24) | \
((eventsel) & 0xff) | \
((umask) & 0xff) << 8)
/* * These are technically Intel's definitions, but except for CMASK (see above), * AMD's layout is compatible with Intel's.
*/ #define ARCH_PERFMON_EVENTSEL_EVENT GENMASK_ULL(7, 0) #define ARCH_PERFMON_EVENTSEL_UMASK GENMASK_ULL(15, 8) #define ARCH_PERFMON_EVENTSEL_USR BIT_ULL(16) #define ARCH_PERFMON_EVENTSEL_OS BIT_ULL(17) #define ARCH_PERFMON_EVENTSEL_EDGE BIT_ULL(18) #define ARCH_PERFMON_EVENTSEL_PIN_CONTROL BIT_ULL(19) #define ARCH_PERFMON_EVENTSEL_INT BIT_ULL(20) #define ARCH_PERFMON_EVENTSEL_ANY BIT_ULL(21) #define ARCH_PERFMON_EVENTSEL_ENABLE BIT_ULL(22) #define ARCH_PERFMON_EVENTSEL_INV BIT_ULL(23) #define ARCH_PERFMON_EVENTSEL_CMASK GENMASK_ULL(31, 24)
/* * Note! The order and thus the index of the architectural events matters as * support for each event is enumerated via CPUID using the index of the event.
*/ enum intel_pmu_architectural_events {
INTEL_ARCH_CPU_CYCLES_INDEX,
INTEL_ARCH_INSTRUCTIONS_RETIRED_INDEX,
INTEL_ARCH_REFERENCE_CYCLES_INDEX,
INTEL_ARCH_LLC_REFERENCES_INDEX,
INTEL_ARCH_LLC_MISSES_INDEX,
INTEL_ARCH_BRANCHES_RETIRED_INDEX,
INTEL_ARCH_BRANCHES_MISPREDICTED_INDEX,
INTEL_ARCH_TOPDOWN_SLOTS_INDEX,
NR_INTEL_ARCH_EVENTS,
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.