/* * CPU vendor IDs * * Define as bits because they're used for vendor_specific bitmask in * the struct resctrl_test.
*/ #define ARCH_INTEL 1 #define ARCH_AMD 2
#define END_OF_TESTS 1
#define BENCHMARK_ARGS 64
#define MINIMUM_SPAN (250 * MB)
/* * Memory bandwidth (in MiB) below which the bandwidth comparisons * between iMC and resctrl are considered unreliable. For example RAS * features or memory performance features that generate memory traffic * may drive accesses that are counted differently by performance counters * and MBM respectively, for instance generating "overhead" traffic which * is not counted against any specific RMID.
*/ #define THROTTLE_THRESHOLD 750
/* * fill_buf_param: "fill_buf" benchmark parameters * @buf_size: Size (in bytes) of buffer used in benchmark. * "fill_buf" allocates and initializes buffer of * @buf_size. User can change value via command line. * @memflush: If false the buffer will not be flushed after * allocation and initialization, otherwise the * buffer will be flushed. User can change value via * command line (via integers with 0 interpreted as * false and anything else as true).
*/ struct fill_buf_param {
size_t buf_size; bool memflush;
};
/* * user_params: User supplied parameters * @cpu: CPU number to which the benchmark will be bound to * @bits: Number of bits used for cache allocation size * @benchmark_cmd: Benchmark command to run during (some of the) tests * @fill_buf: Pointer to user provided parameters for "fill_buf", * NULL if user did not provide parameters and test * specific defaults should be used.
*/ struct user_params { int cpu; int bits; constchar *benchmark_cmd[BENCHMARK_ARGS]; conststruct fill_buf_param *fill_buf;
};
/* * resctrl_test: resctrl test definition * @name: Test name * @group: Test group - a common name for tests that share some characteristic * (e.g., L3 CAT test belongs to the CAT group). Can be NULL * @resource: Resource to test (e.g., MB, L3, L2, etc.) * @vendor_specific: Bitmask for vendor-specific tests (can be 0 for universal tests) * @disabled: Test is disabled * @feature_check: Callback to check required resctrl features * @run_test: Callback to run the test * @cleanup: Callback to cleanup after the test
*/ struct resctrl_test { constchar *name; constchar *group; constchar *resource; unsignedint vendor_specific; bool disabled; bool (*feature_check)(conststruct resctrl_test *test); int (*run_test)(conststruct resctrl_test *test, conststruct user_params *uparams); void (*cleanup)(void);
};
/* * resctrl_val_param: resctrl test parameters * @ctrlgrp: Name of the control monitor group (con_mon grp) * @mongrp: Name of the monitor group (mon grp) * @filename: Name of file to which the o/p should be written * @init: Callback function to initialize test environment * @setup: Callback function to setup per test run environment * @measure: Callback that performs the measurement (a single test) * @fill_buf: Parameters for default "fill_buf" benchmark. * Initialized with user provided parameters, possibly * adapted to be relevant to the test. If user does * not provide parameters for "fill_buf" nor a * replacement benchmark then initialized with defaults * appropriate for test. NULL if user provided * benchmark.
*/ struct resctrl_val_param { constchar *ctrlgrp; constchar *mongrp; char filename[64]; unsignedlong mask; int num_of_runs; int (*init)(conststruct resctrl_val_param *param, int domain_id); int (*setup)(conststruct resctrl_test *test, conststruct user_params *uparams, struct resctrl_val_param *param); int (*measure)(conststruct user_params *uparams, struct resctrl_val_param *param,
pid_t bm_pid); struct fill_buf_param *fill_buf;
};
struct perf_event_read {
__u64 nr; /* The number of events */ struct {
__u64 value; /* The value of the event */
} values[2];
};
/* * Memory location that consumes values compiler must not optimize away. * Volatile ensures writes to this location cannot be optimized away by * compiler.
*/ externvolatileint *value_sink;
externint snc_unreliable;
externchar llc_occup_path[1024];
int snc_nodes_per_l3_cache(void); int get_vendor(void); bool check_resctrlfs_support(void); int filter_dmesg(void); int get_domain_id(constchar *resource, int cpu_no, int *domain_id); int mount_resctrlfs(void); int umount_resctrlfs(void); bool resctrl_resource_exists(constchar *resource); bool resctrl_mon_feature_exists(constchar *resource, constchar *feature); bool resource_info_file_exists(constchar *resource, constchar *file); bool test_resource_feature_check(conststruct resctrl_test *test); char *fgrep(FILE *inf, constchar *str); int taskset_benchmark(pid_t bm_pid, int cpu_no, cpu_set_t *old_affinity); int taskset_restore(pid_t bm_pid, cpu_set_t *old_affinity); int write_schemata(constchar *ctrlgrp, char *schemata, int cpu_no, constchar *resource); int write_bm_pid_to_resctrl(pid_t bm_pid, constchar *ctrlgrp, constchar *mongrp); int perf_event_open(struct perf_event_attr *hw_event, pid_t pid, int cpu, int group_fd, unsignedlong flags); unsignedchar *alloc_buffer(size_t buf_size, bool memflush); void mem_flush(unsignedchar *buf, size_t buf_size); void fill_cache_read(unsignedchar *buf, size_t buf_size, bool once);
ssize_t get_fill_buf_size(int cpu_no, constchar *cache_type); int initialize_read_mem_bw_imc(void); int measure_read_mem_bw(conststruct user_params *uparams, struct resctrl_val_param *param, pid_t bm_pid); void initialize_mem_bw_resctrl(conststruct resctrl_val_param *param, int domain_id); int resctrl_val(conststruct resctrl_test *test, conststruct user_params *uparams, struct resctrl_val_param *param); unsignedlong create_bit_mask(unsignedint start, unsignedint len); unsignedint count_contiguous_bits(unsignedlong val, unsignedint *start); int get_full_cbm(constchar *cache_type, unsignedlong *mask); int get_mask_no_shareable(constchar *cache_type, unsignedlong *mask); int get_cache_size(int cpu_no, constchar *cache_type, unsignedlong *cache_size); int resource_info_unsigned_get(constchar *resource, constchar *filename, unsignedint *val); void ctrlc_handler(int signum, siginfo_t *info, void *ptr); int signal_handler_register(conststruct resctrl_test *test); void signal_handler_unregister(void); unsignedint count_bits(unsignedlong n); int snc_kernel_support(void);
void perf_event_attr_initialize(struct perf_event_attr *pea, __u64 config); void perf_event_initialize_read_format(struct perf_event_read *pe_read); int perf_open(struct perf_event_attr *pea, pid_t pid, int cpu_no); int perf_event_reset_enable(int pe_fd); int perf_event_measure(int pe_fd, struct perf_event_read *pe_read, constchar *filename, pid_t bm_pid); int measure_llc_resctrl(constchar *filename, pid_t bm_pid); void show_cache_info(int no_of_bits, __u64 avg_llc_val, size_t cache_span, bool lines);
/* * cache_portion_size - Calculate the size of a cache portion * @cache_size: Total cache size in bytes * @portion_mask: Cache portion mask * @full_cache_mask: Full Cache Bit Mask (CBM) for the cache * * Return: The size of the cache portion in bytes.
*/ staticinlineunsignedlong cache_portion_size(unsignedlong cache_size, unsignedlong portion_mask, unsignedlong full_cache_mask)
{ unsignedint bits = count_bits(full_cache_mask);
/* * With no bits the full CBM, assume cache cannot be split into * smaller portions. To avoid divide by zero, return cache_size.
*/ if (!bits) return cache_size;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.