if (RC_CHK_GET(result, map))
refcount_inc(perf_cpu_map__refcnt(map));
return result;
}
void perf_cpu_map__put(struct perf_cpu_map *map)
{ if (map) { if (refcount_dec_and_test(perf_cpu_map__refcnt(map)))
cpu_map__delete(map); else
RC_CHK_PUT(map);
}
}
staticstruct perf_cpu_map *cpu_map__new_sysconf(void)
{ struct perf_cpu_map *cpus; int nr_cpus, nr_cpus_conf;
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); if (nr_cpus < 0) return NULL;
nr_cpus_conf = sysconf(_SC_NPROCESSORS_CONF); if (nr_cpus != nr_cpus_conf) {
pr_warning("Number of online CPUs (%d) differs from the number configured (%d) the CPU map will only cover the first %d CPUs.",
nr_cpus, nr_cpus_conf, nr_cpus);
}
cpus = perf_cpu_map__alloc(nr_cpus); if (cpus != NULL) { int i;
for (i = 0; i < nr_cpus; ++i)
RC_CHK_ACCESS(cpus)->map[i].cpu = i;
}
if (!cpu_list) return perf_cpu_map__new_online_cpus();
/* * must handle the case of empty cpumap to cover * TOPOLOGY header for NUMA nodes with no CPU * ( e.g., because of CPU hotplug)
*/ if (!isdigit(*cpu_list) && *cpu_list != '\0') goto out;
WARN_ONCE(end_cpu >= MAX_NR_CPUS, "Perf can support %d CPUs. " "Consider raising MAX_NR_CPUS\n", MAX_NR_CPUS);
for (; start_cpu <= end_cpu; start_cpu++) { /* check for duplicates */ for (i = 0; i < nr_cpus; i++) if (tmp_cpus[i].cpu == (int16_t)start_cpu) goto invalid;
/** Is 'b' a subset of 'a'. */ bool perf_cpu_map__is_subset(conststruct perf_cpu_map *a, conststruct perf_cpu_map *b)
{ if (a == b || !b) returntrue; if (!a || __perf_cpu_map__nr(b) > __perf_cpu_map__nr(a)) returnfalse;
for (int i = 0, j = 0; i < __perf_cpu_map__nr(a); i++) { if (__perf_cpu_map__cpu(a, i).cpu > __perf_cpu_map__cpu(b, j).cpu) returnfalse; if (__perf_cpu_map__cpu(a, i).cpu == __perf_cpu_map__cpu(b, j).cpu) {
j++; if (j == __perf_cpu_map__nr(b)) returntrue;
}
} returnfalse;
}
/* * Merge two cpumaps. * * If 'other' is subset of '*orig', '*orig' keeps itself with no reference count * change (similar to "realloc"). * * If '*orig' is subset of 'other', '*orig' reuses 'other' with its reference * count increased. * * Otherwise, '*orig' gets freed and replaced with a new map.
*/ int perf_cpu_map__merge(struct perf_cpu_map **orig, struct perf_cpu_map *other)
{ struct perf_cpu *tmp_cpus; int tmp_len; int i, j, k; struct perf_cpu_map *merged;
if (perf_cpu_map__is_subset(*orig, other)) return 0; if (perf_cpu_map__is_subset(other, *orig)) {
perf_cpu_map__put(*orig);
*orig = perf_cpu_map__get(other); return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.