/* * Locking/sorting note: * * Sorting is done with the write lock, iteration and binary searching happens * under the read lock requiring being sorted. There is a race between sorting * releasing the write lock and acquiring the read lock for iteration/searching * where another thread could insert and break the sorting of the maps. In * practice inserting maps should be rare meaning that the race shouldn't lead * to live lock. Removal of maps doesn't break being sorted.
*/
DECLARE_RC_STRUCT(maps) { struct rw_semaphore lock; /** * @maps_by_address: array of maps sorted by their starting address if * maps_by_address_sorted is true.
*/ struct map **maps_by_address; /** * @maps_by_name: optional array of maps sorted by their dso name if * maps_by_name_sorted is true.
*/ struct map **maps_by_name; struct machine *machine; #ifdef HAVE_LIBUNWIND_SUPPORT void *addr_space; conststruct unwind_libunwind_ops *unwind_libunwind_ops; #endif
refcount_t refcnt; /** * @nr_maps: number of maps_by_address, and possibly maps_by_name, * entries that contain maps.
*/ unsignedint nr_maps; /** * @nr_maps_allocated: number of entries in maps_by_address and possibly * maps_by_name.
*/ unsignedint nr_maps_allocated; /** * @last_search_by_name_idx: cache of last found by name entry's index * as frequent searches for the same dso name are common.
*/ unsignedint last_search_by_name_idx; /** @maps_by_address_sorted: is maps_by_address sorted. */ bool maps_by_address_sorted; /** @maps_by_name_sorted: is maps_by_name sorted. */ bool maps_by_name_sorted; /** @ends_broken: does the map contain a map where end values are unset/unsorted? */ bool ends_broken;
};
staticvoid check_invariants(conststruct maps *maps __maybe_unused)
{ #ifndef NDEBUG
assert(RC_CHK_ACCESS(maps)->nr_maps <= RC_CHK_ACCESS(maps)->nr_maps_allocated); for (unsignedint i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { struct map *map = RC_CHK_ACCESS(maps)->maps_by_address[i];
/* Check map is well-formed. */
assert(map__end(map) == 0 || map__start(map) <= map__end(map)); /* Expect at least 1 reference count. */
assert(refcount_read(map__refcnt(map)) > 0);
if (map__dso(map) && dso__kernel(map__dso(map)))
assert(RC_CHK_EQUAL(map__kmap(map)->kmaps, maps));
if (i > 0) { struct map *prev = RC_CHK_ACCESS(maps)->maps_by_address[i - 1];
/* If addresses are sorted... */ if (RC_CHK_ACCESS(maps)->maps_by_address_sorted) { /* Maps should be in start address order. */
assert(map__start(prev) <= map__start(map)); /* * If the ends of maps aren't broken (during * construction) then they should be ordered * too.
*/ if (!RC_CHK_ACCESS(maps)->ends_broken) {
assert(map__end(prev) <= map__end(map));
assert(map__end(prev) <= map__start(map) ||
map__start(prev) == map__start(map));
}
}
}
} if (RC_CHK_ACCESS(maps)->maps_by_name) { for (unsignedint i = 0; i < RC_CHK_ACCESS(maps)->nr_maps; i++) { struct map *map = RC_CHK_ACCESS(maps)->maps_by_name[i];
/* * Maps by name maps should be in maps_by_address, so * the reference count should be higher.
*/
assert(refcount_read(map__refcnt(map)) > 1);
}
} #endif
}
/* Not in the header, to aid reference counting. */ staticstruct map **maps__maps_by_name(conststruct maps *maps)
{ return RC_CHK_ACCESS(maps)->maps_by_name;
for (unsignedint i = 0; i < maps__nr_maps(maps); i++) {
map__zput(maps_by_address[i]); if (maps_by_name)
map__zput(maps_by_name[i]);
}
zfree(&maps_by_address);
zfree(&maps_by_name);
unwind__finish_access(maps);
}
staticvoid __maps__free_maps_by_name(struct maps *maps)
{ if (!maps__maps_by_name(maps)) return;
/* * Free everything to try to do it from the rbtree in the next search
*/ for (unsignedint i = 0; i < maps__nr_maps(maps); i++)
map__put(maps__maps_by_name(maps)[i]);
zfree(&RC_CHK_ACCESS(maps)->maps_by_name);
/* Consistent with maps__init(). When maps_by_name == NULL, maps_by_name_sorted == false */
maps__set_maps_by_name_sorted(maps, false);
}
maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new)); if (!maps_by_address) return -ENOMEM;
maps__set_maps_by_address(maps, maps_by_address); if (maps_by_name) {
maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new)); if (!maps_by_name) { /* * If by name fails, just disable by name and it will * recompute next time it is required.
*/
__maps__free_maps_by_name(maps);
}
maps__set_maps_by_name(maps, maps_by_name);
}
RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
} /* Insert the value at the end. */
maps_by_address[nr_maps] = map__get(new);
map__set_kmap_maps(new, maps); if (maps_by_name)
maps_by_name[nr_maps] = map__get(new);
/* * Recompute if things are sorted. If things are inserted in a sorted * manner, for example by processing /proc/pid/maps, then no * sorting/resorting will be necessary.
*/ if (nr_maps == 1) { /* If there's just 1 entry then maps are sorted. */
maps__set_maps_by_address_sorted(maps, true);
maps__set_maps_by_name_sorted(maps, maps_by_name != NULL);
} else { /* Sorted if maps were already sorted and this map starts after the last one. */
maps__set_maps_by_address_sorted(maps,
maps__maps_by_address_sorted(maps) &&
map__end(maps_by_address[nr_maps - 2]) <= map__start(new));
maps__set_maps_by_name_sorted(maps, false);
} if (map__end(new) < map__start(new))
RC_CHK_ACCESS(maps)->ends_broken = true;
return 0;
}
int maps__insert(struct maps *maps, struct map *map)
{ int ret;
down_write(maps__lock(maps));
ret = __maps__insert(maps, map);
check_invariants(maps);
up_write(maps__lock(maps)); return ret;
}
int maps__for_each_map(struct maps *maps, int (*cb)(struct map *map, void *data), void *data)
{ bool done = false; int ret = 0;
/* See locking/sorting note. */ while (!done) {
down_read(maps__lock(maps)); if (maps__maps_by_address_sorted(maps)) { /* * maps__for_each_map callbacks may buggily/unsafely * insert into maps_by_address. Deliberately reload * maps__nr_maps and maps_by_address on each iteration * to avoid using memory freed by maps__insert growing * the array - this may cause maps to be skipped or * repeated.
*/ for (unsignedint i = 0; i < maps__nr_maps(maps); i++) { struct map **maps_by_address = maps__maps_by_address(maps); struct map *map = maps_by_address[i];
ret = cb(map, data); if (ret) break;
}
done = true;
}
up_read(maps__lock(maps)); if (!done)
maps__sort_by_address(maps);
} return ret;
}
/* * Find first map where end > map->start. * Same as find_vma() in kernel.
*/ staticunsignedint first_ending_after(struct maps *maps, conststruct map *map)
{ struct map **maps_by_address = maps__maps_by_address(maps); int low = 0, high = (int)maps__nr_maps(maps) - 1, first = high + 1;
assert(maps__maps_by_address_sorted(maps)); if (low <= high && map__end(maps_by_address[0]) > map__start(map)) return 0;
while (low <= high) { int mid = (low + high) / 2; struct map *pos = maps_by_address[mid];
if (map__end(pos) > map__start(map)) {
first = mid; if (map__start(pos) <= map__start(map)) { /* Entry overlaps map. */ break;
}
high = mid - 1;
} else
low = mid + 1;
} return first;
}
maps_by_address = realloc(maps_by_address, nr_allocate * sizeof(new1)); if (!maps_by_address) return -ENOMEM;
maps__set_maps_by_address(maps, maps_by_address); if (maps_by_name) {
maps_by_name = realloc(maps_by_name, nr_allocate * sizeof(new1)); if (!maps_by_name) { /* * If by name fails, just disable by name and it will * recompute next time it is required.
*/
__maps__free_maps_by_name(maps);
}
maps__set_maps_by_name(maps, maps_by_name);
}
RC_CHK_ACCESS(maps)->nr_maps_allocated = nr_allocate;
}
memmove(&maps_by_address[first_after_index+to_add],
&maps_by_address[first_after_index],
(nr_maps - first_after_index) * sizeof(new1));
maps_by_address[first_after_index] = map__get(new1); if (maps_by_name)
maps_by_name[nr_maps] = map__get(new1); if (new2) {
maps_by_address[first_after_index + 1] = map__get(new2); if (maps_by_name)
maps_by_name[nr_maps + 1] = map__get(new2);
}
RC_CHK_ACCESS(maps)->nr_maps = nr_maps + to_add;
maps__set_maps_by_name_sorted(maps, false);
map__set_kmap_maps(new1, maps);
map__set_kmap_maps(new2, maps);
check_invariants(maps); return 0;
}
/* * Adds new to maps, if new overlaps existing entries then the existing maps are * adjusted or removed so that new fits without overlapping any entries.
*/ staticint __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
{ int err = 0;
FILE *fp = debug_file(); unsignedint i, ni = INT_MAX; // Some gcc complain, but depends on maps_by_name...
if (!maps__maps_by_address_sorted(maps))
__maps__sort_by_address(maps);
/* * Iterate through entries where the end of the existing entry is * greater-than the new map's start.
*/ for (i = first_ending_after(maps, new); i < maps__nr_maps(maps); ) { struct map **maps_by_address = maps__maps_by_address(maps); struct map **maps_by_name = maps__maps_by_name(maps); struct map *pos = maps_by_address[i]; struct map *before = NULL, *after = NULL;
/* * Stop if current map starts after map->end. * Maps are ordered by start: next will not overlap for sure.
*/ if (map__start(pos) >= map__end(new)) break;
if (use_browser) {
pr_debug("overlapping maps in %s (disable tui for more info)\n",
dso__name(map__dso(new)));
} elseif (verbose >= 2) {
pr_debug("overlapping maps:\n");
map__fprintf(new, fp);
map__fprintf(pos, fp);
}
if (maps_by_name)
ni = maps__by_name_index(maps, pos);
/* * Now check if we need to create new maps for areas not * overlapped by the new map:
*/ if (map__start(new) > map__start(pos)) { /* Map starts within existing map. Need to shorten the existing map. */
before = map__clone(pos);
if (verbose >= 2 && !use_browser)
map__fprintf(before, fp);
} if (map__end(new) < map__end(pos)) { /* The new map isn't as long as the existing map. */
after = map__clone(pos);
if (verbose >= 2 && !use_browser)
map__fprintf(after, fp);
} /* * If adding one entry, for `before` or `after`, we can replace * the existing entry. If both `before` and `after` are * necessary than an insert is needed. If the existing entry * entirely overlaps the existing entry it can just be removed.
*/ if (before) {
map__put(maps_by_address[i]);
maps_by_address[i] = before;
map__set_kmap_maps(before, maps);
if (maps_by_name) {
map__put(maps_by_name[ni]);
maps_by_name[ni] = map__get(before);
}
/* Maps are still ordered, go to next one. */
i++; if (after) { /* * 'before' and 'after' mean 'new' split the * 'pos' mapping and therefore there are no * later mappings.
*/
err = __maps__insert_sorted(maps, i, new, after);
map__put(after);
check_invariants(maps); return err;
}
check_invariants(maps);
} elseif (after) { /* * 'after' means 'new' split 'pos' and there are no * later mappings.
*/
map__put(maps_by_address[i]);
maps_by_address[i] = map__get(new);
map__set_kmap_maps(new, maps);
if (maps_by_name) {
map__put(maps_by_name[ni]);
maps_by_name[ni] = map__get(new);
}
if (i + 1 < maps__nr_maps(maps))
next = maps_by_address[i + 1];
if (!next || map__start(next) >= map__end(new)) { /* * Replace existing mapping and end knowing * there aren't later overlapping or any * mappings.
*/
map__put(maps_by_address[i]);
maps_by_address[i] = map__get(new);
map__set_kmap_maps(new, maps);
if (maps_by_name) {
map__put(maps_by_name[ni]);
maps_by_name[ni] = map__get(new);
}
check_invariants(maps); return err;
}
__maps__remove(maps, pos);
check_invariants(maps); /* * Maps are ordered but no need to increase `i` as the * later maps were moved down.
*/
}
} /* Add the map. */
err = __maps__insert_sorted(maps, i, new, NULL);
out_err: return err;
}
int maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
{ int err;
int maps__copy_from(struct maps *dest, struct maps *parent)
{ /* Note, if struct map were immutable then cloning could use ref counts. */ struct map **parent_maps_by_address; int err = 0; unsignedint n;
/* See locking/sorting note. */ while (!done) { unsignedint i;
down_read(maps__lock(maps));
/* First check last found entry. */
i = RC_CHK_ACCESS(maps)->last_search_by_name_idx; if (i < maps__nr_maps(maps) && maps__maps_by_name(maps)) { struct dso *dso = map__dso(maps__maps_by_name(maps)[i]);
if (dso && strcmp(dso__short_name(dso), name) == 0) {
result = map__get(maps__maps_by_name(maps)[i]);
done = true;
}
}
/* Second search sorted array. */ if (!done && maps__maps_by_name_sorted(maps)) { struct map **mapp =
bsearch(name, maps__maps_by_name(maps), maps__nr_maps(maps), sizeof(*mapp), map__strcmp_name);
if (mapp) {
result = map__get(*mapp);
i = mapp - maps__maps_by_name(maps);
RC_CHK_ACCESS(maps)->last_search_by_name_idx = i;
}
done = true;
}
up_read(maps__lock(maps)); if (!done) { /* Sort and retry binary search. */ if (maps__sort_by_name(maps)) { /* * Memory allocation failed do linear search * through address sorted maps.
*/ struct map **maps_by_address; unsignedint n;
down_read(maps__lock(maps));
maps_by_address = maps__maps_by_address(maps);
n = maps__nr_maps(maps); for (i = 0; i < n; i++) { struct map *pos = maps_by_address[i]; struct dso *dso = map__dso(pos);
down_read(maps__lock(maps)); while (!maps__maps_by_address_sorted(maps)) {
up_read(maps__lock(maps));
maps__sort_by_address(maps);
down_read(maps__lock(maps));
}
i = maps__by_address_index(maps, map); if (++i < maps__nr_maps(maps))
result = map__get(maps__maps_by_address(maps)[i]);
down_write(maps__lock(maps)); if (!maps__maps_by_address_sorted(maps))
__maps__sort_by_address(maps);
maps_by_address = maps__maps_by_address(maps);
n = maps__nr_maps(maps); for (unsignedint i = 1; i < n; i++) { struct map *prev = maps_by_address[i - 1]; struct map *curr = maps_by_address[i];
if (!map__end(prev) || map__end(prev) > map__start(curr))
map__set_end(prev, map__start(curr));
}
/* * We still haven't the actual symbols, so guess the * last map final address.
*/ if (n > 0 && !map__end(maps_by_address[n - 1]))
map__set_end(maps_by_address[n - 1], ~0ULL);
/* * Merges map into maps by splitting the new map within the existing map * regions.
*/ int maps__merge_in(struct maps *kmaps, struct map *new_map)
{ unsignedint first_after_, kmaps__nr_maps; struct map **kmaps_maps_by_address; struct map **merged_maps_by_address; unsignedint merged_nr_maps_allocated;
/* First try under a read lock. */ while (true) {
down_read(maps__lock(kmaps)); if (maps__maps_by_address_sorted(kmaps)) break;
up_read(maps__lock(kmaps));
/* First after binary search requires sorted maps. Sort and try again. */
maps__sort_by_address(kmaps);
}
first_after_ = first_ending_after(kmaps, new_map);
kmaps_maps_by_address = maps__maps_by_address(kmaps);
if (first_after_ >= maps__nr_maps(kmaps) ||
map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { /* No overlap so regular insert suffices. */
up_read(maps__lock(kmaps)); return maps__insert(kmaps, new_map);
}
up_read(maps__lock(kmaps));
/* Plain insert with a read-lock failed, try again now with the write lock. */
down_write(maps__lock(kmaps)); if (!maps__maps_by_address_sorted(kmaps))
__maps__sort_by_address(kmaps);
if (first_after_ >= kmaps__nr_maps ||
map__start(kmaps_maps_by_address[first_after_]) >= map__end(new_map)) { /* No overlap so regular insert suffices. */ int ret = __maps__insert(kmaps, new_map);
check_invariants(kmaps);
up_write(maps__lock(kmaps)); return ret;
} /* Array to merge into, possibly 1 more for the sake of new_map. */
merged_nr_maps_allocated = RC_CHK_ACCESS(kmaps)->nr_maps_allocated; if (kmaps__nr_maps + 1 == merged_nr_maps_allocated)
merged_nr_maps_allocated++;
/* Copy entries before the new_map that can't overlap. */ for (unsignedint i = 0; i < first_after_; i++)
merged_maps_by_address[i] = map__get(kmaps_maps_by_address[i]);
maps__set_nr_maps(kmaps, first_after_);
/* Add the new map, it will be split when the later overlapping mappings are added. */
__maps__insert(kmaps, new_map);
/* Insert mappings after new_map, splitting new_map in the process. */ for (unsignedint i = first_after_; i < kmaps__nr_maps; i++)
__maps__fixup_overlap_and_insert(kmaps, kmaps_maps_by_address[i]);
/* Copy the maps from merged into kmaps. */ for (unsignedint i = 0; i < kmaps__nr_maps; i++)
map__zput(kmaps_maps_by_address[i]);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.