def equal(self, other): for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t])); if t notin self or t notin other: returnFalse ifnot data_equal(self[t], other[t]): returnFalse returnTrue
def optional(self): if'optional'in self and self['optional'] == '1': returnTrue returnFalse
def diff(self, other): for t in Event.terms: if t notin self or t notin other: continue ifnot data_equal(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
def parse_version(version): ifnot version: returnNone return [int(v) for v in version.split(".")[0:2]]
# Test file description needs to have following sections: # [config] # - just single instance in file # - needs to specify: # 'command' - perf command name # 'args' - special command arguments # 'ret' - Skip test if Perf doesn't exit with this value (0 by default) # 'test_ret'- If set to 'true', fail test instead of skipping for 'ret' argument # 'arch' - architecture specific test (optional) # comma separated list, ! at the beginning # negates it. # 'auxv' - Truthy statement that is evaled in the scope of the auxv map. When false, # the test is skipped. For example 'auxv["AT_HWCAP"] == 10'. (optional) # 'kernel_since' - Inclusive kernel version from which the test will start running. Only the # first two values are supported, for example "6.1" (optional) # 'kernel_until' - Exclusive kernel version from which the test will stop running. (optional) # [eventX:base] # - one or multiple instances in file # - expected values assignments class Test(object): def __init__(self, path, options):
parser = configparser.ConfigParser()
parser.read(path)
def skip_test_auxv(self): def new_auxv(a, pattern):
items = list(filter(None, pattern.split(a))) # AT_HWCAP is hex but doesn't have a prefix, so special case it if items[0] == "AT_HWCAP":
value = int(items[-1], 16) else: try:
value = int(items[-1], 0) except:
value = items[-1] return (items[0], value)
ifnot self.auxv: returnFalse
auxv = subprocess.check_output("LD_SHOW_AUXV=1 sleep 0", shell=True) \
.decode(sys.stdout.encoding)
pattern = re.compile(r"[: ]+")
auxv = dict([new_auxv(a, pattern) for a in auxv.splitlines()]) returnnot eval(self.auxv)
def skip_test_arch(self, myarch): # If architecture not set always run test if self.arch == '': # log.warning("test for arch %s is ok" % myarch) returnFalse
# Allow multiple values in assignment separated by ','
arch_list = self.arch.split(',')
# Handle negated list such as !s390x,ppc if arch_list[0][0] == '!':
arch_list[0] = arch_list[0][1:]
log.warning("excluded architecture list %s" % arch_list) for arch_item in arch_list: # log.warning("test for %s arch is %s" % (arch_item, myarch)) if arch_item == myarch: returnTrue returnFalse
for arch_item in arch_list: # log.warning("test for architecture '%s' current '%s'" % (arch_item, myarch)) if arch_item == myarch: returnFalse returnTrue
def restore_sample_rate(self, value=10000): try: # Check value of sample_rate with open("/proc/sys/kernel/perf_event_max_sample_rate", "r") as fIn:
curr_value = fIn.readline() # If too low restore to reasonable value ifnot curr_value or int(curr_value) < int(value): with open("/proc/sys/kernel/perf_event_max_sample_rate", "w") as fOut:
fOut.write(str(value))
except IOError as e:
log.warning("couldn't restore sample_rate value: I/O error %s" % e) except ValueError as e:
log.warning("couldn't restore sample_rate value: Value error %s" % e) except TypeError as e:
log.warning("couldn't restore sample_rate value: Type error %s" % e)
# The event record section header contains 'event' word, # optionaly followed by ':' allowing to load 'parent # event' first as a base for section in filter(self.is_event, parser_event.sections()):
# For each expected event find all matching # events in result. Fail if there's not any. for exp_name, exp_event in expect.items():
exp_list = []
res_event = {}
log.debug(" matching [%s]" % exp_name) for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name) if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK") else:
log.debug(" ->FAIL");
# we did not any matching event - fail ifnot exp_list: if exp_event.optional():
log.debug(" %s does not match, but is optional" % exp_name) else: ifnot res_event:
log.debug(" res_event is empty"); else:
exp_event.diff(res_event) raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events # check we match the same group in the result. for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''): continue
for res_name in match[exp_name]:
res_group = result[res_name].group if res_group notin match[group]: raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events): for name, event in events.items():
group_fd = event['group_fd']; if group_fd == '-1': continue;
for iname, ievent in events.items(): if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname)) break;
def run(self):
tempdir = tempfile.mkdtemp();
try: # run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events"); for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally: # cleanup
shutil.rmtree(tempdir)
def run_tests(options): for f in glob.glob(options.test_dir + '/' + options.test): try:
Test(f, options).run() except Unsup as obj:
log.warning("unsupp %s" % obj.getMsg()) except Notest as obj:
log.warning("skipped %s" % obj.getMsg())
def setup_log(verbose): global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING if verbose == 2:
level = logging.INFO if verbose >= 3:
level = logging.DEBUG
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.