/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ /* vim: set ts=8 sts=2 et sw=2 tw=80: */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include"LulMain.h"
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> // write(), only for testing LUL
// Set this to 1 for verbose logging #define DEBUG_MAIN 0
namespace lul {
using mozilla::CheckedInt; using mozilla::DebugOnly; using mozilla::MallocSizeOf; using mozilla::Unused; using std::pair; using std::string; using std::vector;
// WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING // // Some functions in this file are marked RUNS IN NO-MALLOC CONTEXT. // Any such function -- and, hence, the transitive closure of those // reachable from it -- must not do any dynamic memory allocation. // Doing so risks deadlock. There is exactly one root function for // the transitive closure: Lul::Unwind. // // WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
staticconstchar* NameOf_DW_REG(int16_t aReg) { switch (aReg) { case DW_REG_CFA: return"cfa"; #ifdefined(GP_ARCH_amd64) || defined(GP_ARCH_x86) case DW_REG_INTEL_XBP: return"xbp"; case DW_REG_INTEL_XSP: return"xsp"; case DW_REG_INTEL_XIP: return"xip"; #elifdefined(GP_ARCH_arm) case DW_REG_ARM_R7: return"r7"; case DW_REG_ARM_R11: return"r11"; case DW_REG_ARM_R12: return"r12"; case DW_REG_ARM_R13: return"r13"; case DW_REG_ARM_R14: return"r14"; case DW_REG_ARM_R15: return"r15"; #elifdefined(GP_ARCH_arm64) case DW_REG_AARCH64_X29: return"x29"; case DW_REG_AARCH64_X30: return"x30"; case DW_REG_AARCH64_SP: return"sp"; #elifdefined(GP_ARCH_mips64) case DW_REG_MIPS_SP: return"sp"; case DW_REG_MIPS_FP: return"fp"; case DW_REG_MIPS_PC: return"pc"; #else # error "Unsupported arch" #endif default: return"???";
}
}
string LExpr::ShowRule(constchar* aNewReg) const { char buf[64];
string res = string(aNewReg) + "="; switch (mHow) { case UNKNOWN:
res += "Unknown"; break; case NODEREF:
SprintfLiteral(buf, "%s+%d", NameOf_DW_REG(mReg), (int)mOffset);
res += buf; break; case DEREF:
SprintfLiteral(buf, "*(%s+%d)", NameOf_DW_REG(mReg), (int)mOffset);
res += buf; break; case PFXEXPR:
SprintfLiteral(buf, "PfxExpr-at-%d", (int)mOffset);
res += buf; break; default:
res += "???"; break;
} return res;
}
void RuleSet::Print(uintptr_t avma, uintptr_t len, void (*aLog)(constchar*)) const { char buf[96];
SprintfLiteral(buf, "[%llx .. %llx]: let ", (unsignedlonglongint)avma,
(unsignedlonglongint)(avma + len - 1));
string res = string(buf);
res += mCfaExpr.ShowRule("cfa");
res += " in"; // For each reg we care about, print the recovery expression. #ifdefined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
res += mXipExpr.ShowRule(" RA");
res += mXspExpr.ShowRule(" SP");
res += mXbpExpr.ShowRule(" BP"); #elifdefined(GP_ARCH_arm)
res += mR15expr.ShowRule(" R15");
res += mR7expr.ShowRule(" R7");
res += mR11expr.ShowRule(" R11");
res += mR12expr.ShowRule(" R12");
res += mR13expr.ShowRule(" R13");
res += mR14expr.ShowRule(" R14"); #elifdefined(GP_ARCH_arm64)
res += mX29expr.ShowRule(" X29");
res += mX30expr.ShowRule(" X30");
res += mSPexpr.ShowRule(" SP"); #elifdefined(GP_ARCH_mips64)
res += mPCexpr.ShowRule(" PC");
res += mSPexpr.ShowRule(" SP");
res += mFPexpr.ShowRule(" FP"); #else # error "Unsupported arch" #endif
aLog(res.c_str());
}
LExpr* RuleSet::ExprForRegno(DW_REG_NUMBER aRegno) { switch (aRegno) { case DW_REG_CFA: return &mCfaExpr; #ifdefined(GP_ARCH_amd64) || defined(GP_ARCH_x86) case DW_REG_INTEL_XIP: return &mXipExpr; case DW_REG_INTEL_XSP: return &mXspExpr; case DW_REG_INTEL_XBP: return &mXbpExpr; #elifdefined(GP_ARCH_arm) case DW_REG_ARM_R15: return &mR15expr; case DW_REG_ARM_R14: return &mR14expr; case DW_REG_ARM_R13: return &mR13expr; case DW_REG_ARM_R12: return &mR12expr; case DW_REG_ARM_R11: return &mR11expr; case DW_REG_ARM_R7: return &mR7expr; #elifdefined(GP_ARCH_arm64) case DW_REG_AARCH64_X29: return &mX29expr; case DW_REG_AARCH64_X30: return &mX30expr; case DW_REG_AARCH64_SP: return &mSPexpr; #elifdefined(GP_ARCH_mips64) case DW_REG_MIPS_SP: return &mSPexpr; case DW_REG_MIPS_FP: return &mFPexpr; case DW_REG_MIPS_PC: return &mPCexpr; #else # error "Unknown arch" #endif default: return nullptr;
}
}
RuleSet::RuleSet() { // All fields are of type LExpr and so are initialised by LExpr::LExpr().
}
// RUNS IN NO-MALLOC CONTEXT
RuleSet* SecMap::FindRuleSet(uintptr_t ia) { // Binary search mExtents to find one that brackets |ia|. // lo and hi need to be signed, else the loop termination tests // don't work properly. Note that this works correctly even when // mExtents.size() == 0.
// Can't do this until the array has been sorted and preened.
MOZ_ASSERT(mUsable);
longint lo = 0; longint hi = (longint)mExtents.size() - 1; while (true) { // current unsearched space is from lo to hi, inclusive. if (lo > hi) { // not found return nullptr;
} longint mid = lo + ((hi - lo) / 2);
Extent* mid_extent = &mExtents[mid];
uintptr_t mid_offset = mid_extent->offset();
uintptr_t mid_len = mid_extent->len();
uintptr_t mid_minAddr = mMapMinAVMA + mid_offset;
uintptr_t mid_maxAddr = mid_minAddr + mid_len - 1; if (ia < mid_minAddr) {
hi = mid - 1; continue;
} if (ia > mid_maxAddr) {
lo = mid + 1; continue;
}
MOZ_ASSERT(mid_minAddr <= ia && ia <= mid_maxAddr);
uint32_t mid_extent_dictIx = mid_extent->dictIx();
MOZ_RELEASE_ASSERT(mid_extent_dictIx < mExtents.size()); return &mDictionary[mid_extent_dictIx];
} // NOTREACHED
}
// Add a RuleSet to the collection. The rule is copied in. Calling // this makes the map non-searchable. void SecMap::AddRuleSet(const RuleSet* rs, uintptr_t avma, uintptr_t len) {
mUsable = false;
// Zero length RuleSet? Meaningless, but ignore it anyway. if (len == 0) { return;
}
// Ignore attempts to add RuleSets whose address range doesn't fall within // the declared address range for the SecMap. Maybe we should print some // kind of error message rather than silently ignoring them. if (!(avma >= mMapMinAVMA && avma + len - 1 <= mMapMaxAVMA)) { return;
}
// Because `mMapStartAVMA` .. `mMapEndAVMA` can specify at most a 2^32-1 byte // chunk of address space, the following must now hold.
MOZ_RELEASE_ASSERT(len <= (uintptr_t)0xFFFFFFFF);
// See if `mUniqifier` already has `rs`. If so set `dictIx` to the assigned // dictionary index; if not, add `rs` to `mUniqifier` and assign a new // dictionary index. This is the core of the RuleSet-de-duplication process.
uint32_t dictIx = 0;
mozilla::HashMap<RuleSet, uint32_t, RuleSet, InfallibleAllocPolicy>::AddPtr
p = mUniqifier->lookupForAdd(*rs); if (!p) {
dictIx = mUniqifier->count(); // If this ever fails, Extents::dictIx will need to be changed to be a // type wider than the current uint16_t.
MOZ_RELEASE_ASSERT(dictIx < (1 << 16)); // This returns `false` on OOM. We ignore the return value since we asked // for it to use the InfallibleAllocPolicy.
DebugOnly<bool> addedOK = mUniqifier->add(p, *rs, dictIx);
MOZ_ASSERT(addedOK);
} else {
dictIx = p->value();
}
uint32_t offset = (uint32_t)(avma - mMapMinAVMA); while (len > 0) { // Because Extents::len is a uint16_t, we have to add multiple `mExtents` // entries to cover the case where `len` is equal to or greater than 2^16. // This happens only exceedingly rarely. In order to get more test // coverage on what would otherwise be a very low probability (less than // 0.0002%) corner case, we do this in steps of 4095. On libxul.so as of // Sept 2020, this increases the number of `mExtents` entries by about // 0.05%, hence has no meaningful effect on space use, but increases the // use of this corner case, and hence its test coverage, by a factor of 250.
uint32_t this_step_len = (len > 4095) ? 4095 : len;
mExtents.emplace_back(offset, this_step_len, dictIx);
offset += this_step_len;
len -= this_step_len;
}
}
// Add a PfxInstr to the vector of such instrs, and return the index // in the vector. Calling this makes the map non-searchable.
uint32_t SecMap::AddPfxInstr(PfxInstr pfxi) {
mUsable = false;
mPfxInstrs.push_back(pfxi); return mPfxInstrs.size() - 1;
}
// Prepare the map for searching, by sorting it, de-overlapping entries and // removing any resulting zero-length entries. At the start of this routine, // all Extents should fall within [mMapMinAVMA, mMapMaxAVMA] and not have zero // length, as a result of the checks in AddRuleSet(). void SecMap::PrepareRuleSets() { // At this point, the de-duped RuleSets are in `mUniqifier`, and // `mDictionary` is empty. This method will, amongst other things, copy // them into `mDictionary` in order of their assigned dictionary-index // values, as established by `SecMap::AddRuleSet`, and free `mUniqifier`; // after this method, it has no further use.
MOZ_RELEASE_ASSERT(mUniqifier);
MOZ_RELEASE_ASSERT(mDictionary.empty());
if (mExtents.empty()) {
mUniqifier->clear();
mUniqifier = nullptr; return;
}
if (mMapMinAVMA == 1 && mMapMaxAVMA == 0) { // The map is empty. This should never happen.
mExtents.clear();
mUniqifier->clear();
mUniqifier = nullptr; return;
}
MOZ_RELEASE_ASSERT(mMapMinAVMA <= mMapMaxAVMA);
// We must have at least one Extent, and as a consequence there must be at // least one entry in the uniqifier.
MOZ_RELEASE_ASSERT(!mExtents.empty() && !mUniqifier->empty());
#ifdef DEBUG // Check invariants on incoming Extents. for (size_t i = 0; i < mExtents.size(); ++i) {
Extent* ext = &mExtents[i];
uint32_t len = ext->len();
MOZ_ASSERT(len > 0);
MOZ_ASSERT(len <= 4095 /* per '4095' in AddRuleSet() */);
uint32_t offset = ext->offset();
uintptr_t avma = mMapMinAVMA + (uintptr_t)offset; // Upper bounds test. There's no lower bounds test because `offset` is a // positive displacement from `mMapMinAVMA`, so a small underrun will // manifest as `len` being close to 2^32.
MOZ_ASSERT(avma + (uintptr_t)len - 1 <= mMapMaxAVMA);
} #endif
// Iteratively truncate any overlaps and remove any zero length // entries that might result, or that may have been present // initially. Unless the input is seriously screwy, this is // expected to iterate only once. while (true) {
size_t i;
size_t n = mExtents.size();
size_t nZeroLen = 0;
if (n == 0) { break;
}
for (i = 1; i < n; ++i) {
Extent* prev = &mExtents[i - 1];
Extent* here = &mExtents[i];
MOZ_ASSERT(prev->offset() <= here->offset()); if (prev->offset() + prev->len() > here->offset()) {
prev->setLen(here->offset() - prev->offset());
} if (prev->len() == 0) {
nZeroLen++;
}
}
if (mExtents[n - 1].len() == 0) {
nZeroLen++;
}
// At this point, the entries are in-order and non-overlapping. // If none of them are zero-length, we are done. if (nZeroLen == 0) { break;
}
// Slide back the entries to remove the zero length ones.
size_t j = 0; // The write-point. for (i = 0; i < n; ++i) { if (mExtents[i].len() == 0) { continue;
} if (j != i) {
mExtents[j] = mExtents[i];
}
++j;
}
MOZ_ASSERT(i == n);
MOZ_ASSERT(nZeroLen <= n);
MOZ_ASSERT(j == n - nZeroLen); while (nZeroLen > 0) {
mExtents.pop_back();
nZeroLen--;
}
MOZ_ASSERT(mExtents.size() == j);
}
size_t nExtents = mExtents.size();
#ifdef DEBUG // Do a final check on the extents: their address ranges must be // ascending, non overlapping, non zero sized. if (nExtents > 0) {
MOZ_ASSERT(mExtents[0].len() > 0); for (size_t i = 1; i < nExtents; ++i) { const Extent* prev = &mExtents[i - 1]; const Extent* here = &mExtents[i];
MOZ_ASSERT(prev->offset() < here->offset());
MOZ_ASSERT(here->len() > 0);
MOZ_ASSERT(prev->offset() + prev->len() <= here->offset());
}
} #endif
// Create the final dictionary by enumerating the uniqifier.
size_t nUniques = mUniqifier->count();
RuleSet dummy;
mozilla::PodZero(&dummy);
mDictionary.reserve(nUniques); for (size_t i = 0; i < nUniques; i++) {
mDictionary.push_back(dummy);
}
for (auto iter = mUniqifier->iter(); !iter.done(); iter.next()) {
MOZ_RELEASE_ASSERT(iter.get().value() < nUniques);
mDictionary[iter.get().value()] = iter.get().key();
}
size_t SecMap::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
// It's conceivable that these calls would be unsafe with some // implementations of std::vector, but it seems to be working for now...
n += aMallocSizeOf(mPfxInstrs.data());
if (mUniqifier) {
n += mUniqifier->shallowSizeOfIncludingThis(aMallocSizeOf);
}
n += aMallocSizeOf(mDictionary.data());
n += aMallocSizeOf(mExtents.data());
// A SegArray holds a set of address ranges that together exactly // cover an address range, with no overlaps or holes. Each range has // an associated value, which in this case has been specialised to be // a simple boolean. The representation is kept to minimal canonical // form in which adjacent ranges with the same associated value are // merged together. Each range is represented by a |struct Seg|. // // SegArrays are used to keep track of which parts of the address // space are known to contain instructions. class SegArray { public: void add(uintptr_t lo, uintptr_t hi, bool val) { if (lo > hi) { return;
}
split_at(lo); if (hi < UINTPTR_MAX) {
split_at(hi + 1);
}
std::vector<Seg>::size_type iLo, iHi, i;
iLo = find(lo);
iHi = find(hi); for (i = iLo; i <= iHi; ++i) {
mSegs[i].val = val;
}
preen();
}
void preen() { for (std::vector<Seg>::iterator iter = mSegs.begin();
iter < mSegs.end() - 1; ++iter) { if (iter[0].val != iter[1].val) { continue;
}
iter[0].hi = iter[1].hi;
mSegs.erase(iter + 1); // Back up one, so as not to miss an opportunity to merge // with the entry after this one.
--iter;
}
}
// RUNS IN NO-MALLOC CONTEXT
std::vector<Seg>::size_type find(uintptr_t a) { longint lo = 0; longint hi = (longint)mSegs.size(); while (true) { // The unsearched space is lo .. hi inclusive. if (lo > hi) { // Not found. This can't happen. return (std::vector<Seg>::size_type)(-1);
} longint mid = lo + ((hi - lo) / 2);
uintptr_t mid_lo = mSegs[mid].lo;
uintptr_t mid_hi = mSegs[mid].hi; if (a < mid_lo) {
hi = mid - 1; continue;
} if (a > mid_hi) {
lo = mid + 1; continue;
} return (std::vector<Seg>::size_type)mid;
}
}
void split_at(uintptr_t a) {
std::vector<Seg>::size_type i = find(a); if (mSegs[i].lo == a) { return;
}
mSegs.insert(mSegs.begin() + i + 1, mSegs[i]);
mSegs[i].hi = a - 1;
mSegs[i + 1].lo = a;
}
class PriMap { public: explicit PriMap(void (*aLog)(constchar*)) : mLog(aLog) {}
// RUNS IN NO-MALLOC CONTEXT
pair<const RuleSet*, const vector<PfxInstr>*> Lookup(uintptr_t ia) {
SecMap* sm = FindSecMap(ia); return pair<const RuleSet*, const vector<PfxInstr>*>(
sm ? sm->FindRuleSet(ia) : nullptr, sm ? sm->GetPfxInstrs() : nullptr);
}
// Add a secondary map. No overlaps allowed w.r.t. existing // secondary maps. void AddSecMap(mozilla::UniquePtr<SecMap>&& aSecMap) { // We can't add an empty SecMap to the PriMap. But that's OK // since we'd never be able to find anything in it anyway. if (aSecMap->IsEmpty()) { return;
}
// Iterate through the SecMaps and find the right place for this // one. At the same time, ensure that the in-order // non-overlapping invariant is preserved (and, generally, holds). // FIXME: this gives a cost that is O(N^2) in the total number of // shared objects in the system. ToDo: better.
MOZ_ASSERT(aSecMap->mMapMinAVMA <= aSecMap->mMapMaxAVMA);
size_t num_secMaps = mSecMaps.size();
uintptr_t i; for (i = 0; i < num_secMaps; ++i) {
mozilla::UniquePtr<SecMap>& sm_i = mSecMaps[i];
MOZ_ASSERT(sm_i->mMapMinAVMA <= sm_i->mMapMaxAVMA); if (aSecMap->mMapMinAVMA < sm_i->mMapMaxAVMA) { // |aSecMap| needs to be inserted immediately before mSecMaps[i]. break;
}
}
MOZ_ASSERT(i <= num_secMaps); if (i == num_secMaps) { // It goes at the end.
mSecMaps.push_back(std::move(aSecMap));
} else {
std::vector<mozilla::UniquePtr<SecMap>>::iterator iter =
mSecMaps.begin() + i;
mSecMaps.insert(iter, std::move(aSecMap));
} char buf[100];
SprintfLiteral(buf, "AddSecMap: now have %d SecMaps\n",
(int)mSecMaps.size());
buf[sizeof(buf) - 1] = 0;
mLog(buf);
}
// Remove and delete any SecMaps in the mapping, that intersect // with the specified address range. void RemoveSecMapsInRange(uintptr_t avma_min, uintptr_t avma_max) {
MOZ_ASSERT(avma_min <= avma_max);
size_t num_secMaps = mSecMaps.size(); if (num_secMaps > 0) {
intptr_t i; // Iterate from end to start over the vector, so as to ensure // that the special case where |avma_min| and |avma_max| denote // the entire address space, can be completed in time proportional // to the number of elements in the map. for (i = (intptr_t)num_secMaps - 1; i >= 0; i--) {
mozilla::UniquePtr<SecMap>& sm_i = mSecMaps[i]; if (sm_i->mMapMaxAVMA < avma_min || avma_max < sm_i->mMapMinAVMA) { // There's no overlap. Move on. continue;
} // We need to remove mSecMaps[i] and slide all those above it // downwards to cover the hole.
mSecMaps.erase(mSecMaps.begin() + i);
}
}
}
// Return the number of currently contained SecMaps.
size_t CountSecMaps() { return mSecMaps.size(); }
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
// It's conceivable that this call would be unsafe with some // implementations of std::vector, but it seems to be working for now...
n += aMallocSizeOf(mSecMaps.data());
for (size_t i = 0; i < mSecMaps.size(); i++) {
n += mSecMaps[i]->SizeOfIncludingThis(aMallocSizeOf);
}
return n;
}
private: // RUNS IN NO-MALLOC CONTEXT
SecMap* FindSecMap(uintptr_t ia) { // Binary search mSecMaps to find one that brackets |ia|. // lo and hi need to be signed, else the loop termination tests // don't work properly. longint lo = 0; longint hi = (longint)mSecMaps.size() - 1; while (true) { // current unsearched space is from lo to hi, inclusive. if (lo > hi) { // not found return nullptr;
} longint mid = lo + ((hi - lo) / 2);
mozilla::UniquePtr<SecMap>& mid_secMap = mSecMaps[mid];
uintptr_t mid_minAddr = mid_secMap->mMapMinAVMA;
uintptr_t mid_maxAddr = mid_secMap->mMapMaxAVMA; if (ia < mid_minAddr) {
hi = mid - 1; continue;
} if (ia > mid_maxAddr) {
lo = mid + 1; continue;
}
MOZ_ASSERT(mid_minAddr <= ia && ia <= mid_maxAddr); return mid_secMap.get();
} // NOTREACHED
}
private: // sorted array of per-object ranges, non overlapping, non empty
std::vector<mozilla::UniquePtr<SecMap>> mSecMaps;
// a logging sink, for debugging. void (*mLog)(constchar*);
};
void LUL::MaybeShowStats() { // This is racey in the sense that it can't guarantee that // n_new == n_new_Context + n_new_CFI + n_new_Scanned // if it should happen that mStats is updated by some other thread // in between computation of n_new and n_new_{Context,CFI,FP}. // But it's just stats printing, so we don't really care.
uint32_t n_new = mStats - mStatsPrevious; if (n_new >= 5000) {
uint32_t n_new_Context = mStats.mContext - mStatsPrevious.mContext;
uint32_t n_new_CFI = mStats.mCFI - mStatsPrevious.mCFI;
uint32_t n_new_FP = mStats.mFP - mStatsPrevious.mFP;
mStatsPrevious = mStats; char buf[200];
SprintfLiteral(buf, "LUL frame stats: TOTAL %5u" " CTX %4u CFI %4u FP %4u",
n_new, n_new_Context, n_new_CFI, n_new_FP);
buf[sizeof(buf) - 1] = 0;
mLog(buf);
}
}
size_t LUL::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
n += mPriMap->SizeOfIncludingThis(aMallocSizeOf);
// Measurement of the following members may be added later if DMD finds it // is worthwhile: // - mSegArray // - mUSU
return n;
}
void LUL::EnableUnwinding() {
LUL_LOG("LUL::EnableUnwinding"); // Don't assert for Admin mode here. That is, tolerate a call here // if we are already in Unwinding mode.
MOZ_RELEASE_ASSERT(profiler_current_thread_id() == mAdminThreadId);
// We can't have a SecMap covering more than 2^32-1 bytes of address space. // See the definition of SecMap for why. Rather than crash the system, just // limit the SecMap's size accordingly. This case is never actually // expected to happen. if (((unsignedlonglongint)aSize) > 0xFFFFFFFFULL) {
aSize = (uintptr_t)0xFFFFFFFF;
}
MOZ_RELEASE_ASSERT(aSize <= 0xFFFFFFFF);
// Ignore obviously-stupid notifications. if (aSize > 0) { // Here's a new mapping, for this object.
mozilla::UniquePtr<SecMap> smap =
mozilla::MakeUnique<SecMap>(aRXavma, (uint32_t)aSize, mLog);
// Read CFI or EXIDX unwind data into |smap|. if (!aMappedImage) {
(void)lul::ReadSymbolData(string(aFileName), std::vector<string>(),
smap.get(), (void*)aRXavma, aSize, mUSU, mLog);
} else {
(void)lul::ReadSymbolDataInternal(
(const uint8_t*)aMappedImage, string(aFileName),
std::vector<string>(), smap.get(), (void*)aRXavma, aSize, mUSU, mLog);
}
// Add it to the primary map (the top level set of mapped objects).
mPriMap->AddSecMap(std::move(smap));
// Tell the segment array about the mapping, so that the stack // scan and __kernel_syscall mechanisms know where valid code is.
mSegArray->add(aRXavma, aRXavma + aSize - 1, true);
}
}
// Ignore obviously-stupid notifications. if (aSize > 0) { // Tell the segment array about the mapping, so that the stack // scan and __kernel_syscall mechanisms know where valid code is.
mSegArray->add(aRXavma, aRXavma + aSize - 1, true);
}
}
// Remove from the primary map, any secondary maps that intersect // with the address range. Also delete the secondary maps.
mPriMap->RemoveSecMapsInRange(aRXavmaMin, aRXavmaMax);
// Tell the segment array that the address range no longer // contains valid code.
mSegArray->add(aRXavmaMin, aRXavmaMax, false);
SprintfLiteral(buf, "NotifyUnmap: now have %d SecMaps\n",
(int)mPriMap->CountSecMaps());
buf[sizeof(buf) - 1] = 0;
mLog(buf);
}
// RUNS IN NO-MALLOC CONTEXT static TaggedUWord DerefTUW(TaggedUWord aAddr, const StackImage* aStackImg) { if (!aAddr.Valid()) { return TaggedUWord();
}
// Lower limit check. |aAddr.Value()| is the lowest requested address // and |aStackImg->mStartAvma| is the lowest address we actually have, // so the comparison is straightforward. if (aAddr.Value() < aStackImg->mStartAvma) { return TaggedUWord();
}
// Upper limit check. We must compute the highest requested address // and the highest address we actually have, but being careful to // avoid overflow. In particular if |aAddr| is 0xFFF...FFF or the // 3/7 values below that, then we will get overflow. See bug #1245477. typedef CheckedInt<uintptr_t> CheckedUWord;
CheckedUWord highest_requested_plus_one =
CheckedUWord(aAddr.Value()) + CheckedUWord(sizeof(uintptr_t));
CheckedUWord highest_available_plus_one =
CheckedUWord(aStackImg->mStartAvma) + CheckedUWord(aStackImg->mLen); if (!highest_requested_plus_one.isValid() // overflow?
|| !highest_available_plus_one.isValid() // overflow?
|| (highest_requested_plus_one.value() >
highest_available_plus_one.value())) { // in range? return TaggedUWord();
}
// RUNS IN NO-MALLOC CONTEXT static TaggedUWord EvaluateReg(int16_t aReg, const UnwindRegs* aOldRegs,
TaggedUWord aCFA) { switch (aReg) { case DW_REG_CFA: return aCFA; #ifdefined(GP_ARCH_amd64) || defined(GP_ARCH_x86) case DW_REG_INTEL_XBP: return aOldRegs->xbp; case DW_REG_INTEL_XSP: return aOldRegs->xsp; case DW_REG_INTEL_XIP: return aOldRegs->xip; #elifdefined(GP_ARCH_arm) case DW_REG_ARM_R7: return aOldRegs->r7; case DW_REG_ARM_R11: return aOldRegs->r11; case DW_REG_ARM_R12: return aOldRegs->r12; case DW_REG_ARM_R13: return aOldRegs->r13; case DW_REG_ARM_R14: return aOldRegs->r14; case DW_REG_ARM_R15: return aOldRegs->r15; #elifdefined(GP_ARCH_arm64) case DW_REG_AARCH64_X29: return aOldRegs->x29; case DW_REG_AARCH64_X30: return aOldRegs->x30; case DW_REG_AARCH64_SP: return aOldRegs->sp; #elifdefined(GP_ARCH_mips64) case DW_REG_MIPS_SP: return aOldRegs->sp; case DW_REG_MIPS_FP: return aOldRegs->fp; case DW_REG_MIPS_PC: return aOldRegs->pc; #else # error "Unsupported arch" #endif default:
MOZ_ASSERT(0); return TaggedUWord();
}
}
// RUNS IN NO-MALLOC CONTEXT // See prototype for comment.
TaggedUWord EvaluatePfxExpr(int32_t start, const UnwindRegs* aOldRegs,
TaggedUWord aCFA, const StackImage* aStackImg, const vector<PfxInstr>& aPfxInstrs) { // A small evaluation stack, and a stack pointer, which points to // the highest numbered in-use element. constint N_STACK = 10;
TaggedUWord stack[N_STACK]; int stackPointer = -1; for (int i = 0; i < N_STACK; i++) stack[i] = TaggedUWord();
#define PUSH(_tuw) \ do { \ if (stackPointer >= N_STACK - 1) goto fail; /* overflow */ \
stack[++stackPointer] = (_tuw); \
} while (0)
#define POP(_lval) \ do { \ if (stackPointer < 0) goto fail; /* underflow */ \
_lval = stack[stackPointer--]; \
} while (0)
// Cursor in the instruction sequence.
size_t curr = start + 1;
// Check the start point is sane.
size_t nInstrs = aPfxInstrs.size(); if (start < 0 || (size_t)start >= nInstrs) goto fail;
{ // The instruction sequence must start with PX_Start. If not, // something is seriously wrong.
PfxInstr first = aPfxInstrs[start]; if (first.mOpcode != PX_Start) goto fail;
// Push the CFA on the stack to start with (or not), as required by // the original DW_OP_*expression* CFI. if (first.mOperand != 0) PUSH(aCFA);
}
while (true) { if (curr >= nInstrs) goto fail; // ran off the end of the sequence
// RUNS IN NO-MALLOC CONTEXT staticvoid UseRuleSet(/*MOD*/ UnwindRegs* aRegs, const StackImage* aStackImg, const RuleSet* aRS, const vector<PfxInstr>* aPfxInstrs) { // Take a copy of regs, since we'll need to refer to the old values // whilst computing the new ones.
UnwindRegs old_regs = *aRegs;
// Mark all the current register values as invalid, so that the // caller can see, on our return, which ones have been computed // anew. If we don't even manage to compute a new PC value, then // the caller will have to abandon the unwind. // FIXME: Create and use instead: aRegs->SetAllInvalid(); #ifdefined(GP_ARCH_amd64) || defined(GP_ARCH_x86)
aRegs->xbp = TaggedUWord();
aRegs->xsp = TaggedUWord();
aRegs->xip = TaggedUWord(); #elifdefined(GP_ARCH_arm)
aRegs->r7 = TaggedUWord();
aRegs->r11 = TaggedUWord();
aRegs->r12 = TaggedUWord();
aRegs->r13 = TaggedUWord();
aRegs->r14 = TaggedUWord();
aRegs->r15 = TaggedUWord(); #elifdefined(GP_ARCH_arm64)
aRegs->x29 = TaggedUWord();
aRegs->x30 = TaggedUWord();
aRegs->sp = TaggedUWord();
aRegs->pc = TaggedUWord(); #elifdefined(GP_ARCH_mips64)
aRegs->sp = TaggedUWord();
aRegs->fp = TaggedUWord();
aRegs->pc = TaggedUWord(); #else # error "Unsupported arch" #endif
// This is generally useful. const TaggedUWord inval = TaggedUWord();
// First, compute the CFA.
TaggedUWord cfa = aRS->mCfaExpr.EvaluateExpr(&old_regs, inval /*old cfa*/,
aStackImg, aPfxInstrs);
// If we didn't manage to compute the CFA, well .. that's ungood, // but keep going anyway. It'll be OK provided none of the register // value rules mention the CFA. In any case, compute the new values // for each register that we're tracking.
// If we don't have a valid value for the PC, give up. if (!ia.Valid()) { break;
}
// If this is the innermost frame, record the SP value, which // presumably is valid. If this isn't the innermost frame, and we // have a valid SP value, check that its SP value isn't less that // the one we've seen so far, so as to catch potential SP value // cycles. if (*aFramesUsed == 0) {
last_valid_sp = sp;
} else {
MOZ_ASSERT(last_valid_sp.Valid()); if (sp.Valid()) { if (sp.Value() < last_valid_sp.Value()) { // Hmm, SP going in the wrong direction. Let's stop. break;
} // Remember where we got to.
last_valid_sp = sp;
}
}
if (DEBUG_MAIN) { char buf[100];
SprintfLiteral(buf, "ruleset for 0x%llx = %p\n",
(unsignedlonglongint)ia.Value(), ruleset);
buf[sizeof(buf) - 1] = 0;
mLog(buf);
}
#ifdefined(GP_PLAT_x86_android) || defined(GP_PLAT_x86_linux) ///////////////////////////////////////////// //// // On 32 bit x86-linux, syscalls are often done via the VDSO // function __kernel_vsyscall, which doesn't have a corresponding // object that we can read debuginfo from. That effectively kills // off all stack traces for threads blocked in syscalls. Hence // special-case by looking at the code surrounding the program // counter. // // 0xf7757420 <__kernel_vsyscall+0>: push %ecx // 0xf7757421 <__kernel_vsyscall+1>: push %edx // 0xf7757422 <__kernel_vsyscall+2>: push %ebp // 0xf7757423 <__kernel_vsyscall+3>: mov %esp,%ebp // 0xf7757425 <__kernel_vsyscall+5>: sysenter // 0xf7757427 <__kernel_vsyscall+7>: nop // 0xf7757428 <__kernel_vsyscall+8>: nop // 0xf7757429 <__kernel_vsyscall+9>: nop // 0xf775742a <__kernel_vsyscall+10>: nop // 0xf775742b <__kernel_vsyscall+11>: nop // 0xf775742c <__kernel_vsyscall+12>: nop // 0xf775742d <__kernel_vsyscall+13>: nop // 0xf775742e <__kernel_vsyscall+14>: int $0x80 // 0xf7757430 <__kernel_vsyscall+16>: pop %ebp // 0xf7757431 <__kernel_vsyscall+17>: pop %edx // 0xf7757432 <__kernel_vsyscall+18>: pop %ecx // 0xf7757433 <__kernel_vsyscall+19>: ret // // In cases where the sampled thread is blocked in a syscall, its // program counter will point at "pop %ebp". Hence we look for // the sequence "int $0x80; pop %ebp; pop %edx; pop %ecx; ret", and // the corresponding register-recovery actions are: // new_ebp = *(old_esp + 0) // new eip = *(old_esp + 12) // new_esp = old_esp + 16 // // It may also be the case that the program counter points two // nops before the "int $0x80", viz, is __kernel_vsyscall+12, in // the case where the syscall has been restarted but the thread // hasn't been rescheduled. The code below doesn't handle that; // it could easily be made to. // if (!ruleset && *aFramesUsed == 1 && ia.Valid() && sp.Valid()) {
uintptr_t insns_min, insns_max;
uintptr_t eip = ia.Value(); bool b = mSegArray->getBoundingCodeSegment(&insns_min, &insns_max, eip); if (b && eip - 2 >= insns_min && eip + 3 <= insns_max) {
uint8_t* eipC = (uint8_t*)eip; if (eipC[-2] == 0xCD && eipC[-1] == 0x80 && eipC[0] == 0x5D &&
eipC[1] == 0x5A && eipC[2] == 0x59 && eipC[3] == 0xC3) {
TaggedUWord sp_plus_0 = sp;
TaggedUWord sp_plus_12 = sp;
TaggedUWord sp_plus_16 = sp;
sp_plus_12 = sp_plus_12 + TaggedUWord(12);
sp_plus_16 = sp_plus_16 + TaggedUWord(16);
TaggedUWord new_ebp = DerefTUW(sp_plus_0, aStackImg);
TaggedUWord new_eip = DerefTUW(sp_plus_12, aStackImg);
TaggedUWord new_esp = sp_plus_16; if (new_ebp.Valid() && new_eip.Valid() && new_esp.Valid()) {
regs.xbp = new_ebp;
regs.xip = new_eip;
regs.xsp = new_esp; continue;
}
}
}
} //// ///////////////////////////////////////////// #endif// defined(GP_PLAT_x86_android) || defined(GP_PLAT_x86_linux)
// So, do we have a ruleset for this address? If so, use it now. if (ruleset) { if (DEBUG_MAIN) {
ruleset->Print(ia.Value(), 1 /*bogus, but doesn't matter*/, mLog);
mLog("\n");
} // Use the RuleSet to compute the registers for the previous // frame. |regs| is modified in-place.
UseRuleSet(®s, aStackImg, ruleset, pfxinstrs); continue;
}
#ifdefined(GP_PLAT_amd64_linux) || defined(GP_PLAT_x86_linux) || \ defined(GP_PLAT_amd64_android) || defined(GP_PLAT_x86_android) || \ defined(GP_PLAT_amd64_freebsd) // There's no RuleSet for the specified address. On amd64/x86_linux, see if // it's possible to recover the caller's frame by using the frame pointer.
// We seek to compute (new_IP, new_SP, new_BP) from (old_BP, stack image), // and assume the following layout: // // <--- new_SP // +----------+ // | new_IP | (return address) // +----------+ // | new_BP | <--- old_BP // +----------+ // | .... | // | .... | // | .... | // +----------+ <---- old_SP (arbitrary, but must be <= old_BP)
// points at new_BP ?
TaggedUWord old_xbp = regs.xbp; // points at new_IP ?
TaggedUWord old_xbp_plus1 = regs.xbp + TaggedUWord(1 * wordSzB); // is the new_SP ?
TaggedUWord old_xbp_plus2 = regs.xbp + TaggedUWord(2 * wordSzB);
if (old_xbp.Valid() && old_xbp.IsAligned() && old_xsp.Valid() &&
old_xsp.IsAligned() && old_xsp.Value() <= old_xbp.Value()) { // We don't need to do any range, alignment or validity checks for // addresses passed to DerefTUW, since that performs them itself, and // returns an invalid value on failure. Any such value will poison // subsequent uses, and we do a final check for validity before putting // the computed values into |regs|.
TaggedUWord new_xbp = DerefTUW(old_xbp, aStackImg); if (new_xbp.Valid() && new_xbp.IsAligned() &&
old_xbp.Value() < new_xbp.Value()) {
TaggedUWord new_xip = DerefTUW(old_xbp_plus1, aStackImg);
TaggedUWord new_xsp = old_xbp_plus2; if (new_xbp.Valid() && new_xip.Valid() && new_xsp.Valid()) {
regs.xbp = new_xbp;
regs.xip = new_xip;
regs.xsp = new_xsp;
(*aFramePointerFramesAcquired)++; continue;
}
}
} #elifdefined(GP_ARCH_arm64) // Here is an example of generated code for prologue and epilogue.. // // stp x29, x30, [sp, #-16]! // mov x29, sp // ... // ldp x29, x30, [sp], #16 // ret // // Next is another example of generated code. // // stp x20, x19, [sp, #-32]! // stp x29, x30, [sp, #16] // add x29, sp, #0x10 // ... // ldp x29, x30, [sp, #16] // ldp x20, x19, [sp], #32 // ret // // Previous x29 and x30 register are stored in the address of x29 register. // But since sp register value depends on local variables, we cannot compute // previous sp register from current sp/fp/lr register and there is no // regular rule for sp register in prologue. But since return address is lr // register, if x29 is valid, we will get return address without sp // register. // // So we assume the following layout that if no rule set. x29 is frame // pointer, so we will be able to compute x29 and x30 . // // +----------+ <--- new_sp (cannot compute) // | .... | // +----------+ // | new_lr | (return address) // +----------+ // | new_fp | <--- old_fp // +----------+ // | .... | // | .... | // +----------+ <---- old_sp (arbitrary, but unused)
TaggedUWord old_fp = regs.x29; if (old_fp.Valid() && old_fp.IsAligned() && last_valid_sp.Valid() &&
last_valid_sp.Value() <= old_fp.Value()) {
TaggedUWord new_fp = DerefTUW(old_fp, aStackImg); if (new_fp.Valid() && new_fp.IsAligned() &&
old_fp.Value() < new_fp.Value()) {
TaggedUWord old_fp_plus1 = old_fp + TaggedUWord(8);
TaggedUWord new_lr = DerefTUW(old_fp_plus1, aStackImg); if (new_lr.Valid()) {
regs.x29 = new_fp;
regs.x30 = new_lr; // When using frame pointer to walk stack, we cannot compute sp // register since we cannot compute sp register from fp/lr/sp // register, and there is no regular rule to compute previous sp // register. So mark as invalid.
regs.sp = TaggedUWord();
(*aFramePointerFramesAcquired)++; continue;
}
}
} #endif// defined(GP_PLAT_amd64_linux) || defined(GP_PLAT_x86_linux) || // defined(GP_PLAT_amd64_android) || defined(GP_PLAT_x86_android) || // defined(GP_PLAT_amd64_freebsd)
// We failed to recover a frame either using CFI or FP chasing, and we // have no other ways to recover the frame. So we have to give up. break;
} // top level unwind loop
// END UNWIND /////////////////////////////////////////////////////////
}
//////////////////////////////////////////////////////////////// // LUL Unit Testing // ////////////////////////////////////////////////////////////////
// if (0) { // // Show what we have. // fprintf(stderr, "Got %d frames:\n", (int)framesUsed); // for (size_t i = 0; i < framesUsed; i++) { // fprintf(stderr, " [%2d] SP %p PC %p\n", // (int)i, (void*)frameSPs[i], (void*)framePCs[i]); // } // fprintf(stderr, "\n"); //}
// Check to see if there's a consistent binding between digits in // the director string ('1' .. '8') and the PC values acquired by // the unwind. If there isn't, the unwinding has failed somehow.
uintptr_t binding[8]; // binding for '1' .. binding for '8'
memset((void*)binding, 0, sizeof(binding));
// The general plan is to work backwards along the director string // and forwards along the framePCs array. Doing so corresponds to // working outwards from the innermost frame of the recursive test set. constchar* cursor = dstring;
// Find the end. This leaves |cursor| two bytes past the first // character we want to look at -- see comment below. while (*cursor) cursor++;
// Counts the number of consistent frames.
size_t nConsistent = 0;
// Iterate back to the start of the director string. The starting // points are a bit complex. We can't use framePCs[0] because that // contains the PC in this frame (above). We can't use framePCs[1] // because that will contain the PC at return point in the recursive // test group (TestFn[1-8]) for their call "out" to this function, // GetAndCheckStackTrace. Although LUL will compute a correct // return address, that will not be the same return address as for a // recursive call out of the the function to another function in the // group. Hence we can only start consistency checking at // framePCs[2]. // // To be consistent, then, we must ignore the last element in the // director string as that corresponds to framePCs[1]. Hence the // start points are: framePCs[2] and the director string 2 bytes // before the terminating zero. // // Also as a result of this, the number of consistent frames counted // will always be one less than the length of the director string // (not including its terminating zero).
size_t frameIx; for (cursor = cursor - 2, frameIx = 2;
cursor >= dstring && frameIx < framesUsed; cursor--, frameIx++) { char c = *cursor;
uintptr_t pc = framePCs[frameIx]; // If this doesn't hold, the director string is ill-formed.
MOZ_ASSERT(c >= '1' && c <= '8'); int n = ((int)c) - ((int)'1'); if (binding[n] == 0) { // There's no binding for |c| yet, so install |pc| and carry on.
binding[n] = pc;
nConsistent++; continue;
} // There's a pre-existing binding for |c|. Check it's consistent. if (binding[n] != pc) { // Not consistent. Give up now. break;
} // Consistent. Keep going.
nConsistent++;
}
// So, did we succeed? bool passed = nConsistent + 1 == strlen(dstring);
// Macro magic to create a set of 8 mutually recursive functions with // varying frame sizes. These will recurse amongst themselves as // specified by |strP|, the directory string, and call // GetAndCheckStackTrace when the string becomes empty, passing it the // original value of the string. This checks the result, printing // results on |aLUL|'s logging sink, and also returns a boolean // indicating whether or not the results are acceptable (correct).
#define GEN_TEST_FN(NAME, FRAMESIZE) \ bool NAME(LUL* aLUL, constchar* strPorig, constchar* strP) { \ /* Create a frame of size (at least) FRAMESIZE, so that the */ \ /* 8 functions created by this macro offer some variation in frame */ \ /* sizes. This isn't as simple as it might seem, since a clever */ \ /* optimizing compiler (eg, clang-5) detects that the array is unused */ \ /* and removes it. We try to defeat this by passing it to a function */ \ /* in a different compilation unit, and hoping that clang does not */ \ /* notice that the call is a no-op. */ \ char space[FRAMESIZE]; \
Unused << write(1, space, 0); /* write zero bytes of |space| to stdout */ \
\ if (*strP == '\0') { \ /* We've come to the end of the director string. */ \ /* Take a stack snapshot. */ \ /* We purposefully use a negation to avoid tail-call optimization */ \ return !GetAndCheckStackTrace(aLUL, strPorig); \
--> --------------------
--> maximum size reached
--> --------------------
¤ Diese beiden folgenden Angebotsgruppen bietet das Unternehmen0.35Angebot
Wie Sie bei der Firma Beratungs- und Dienstleistungen beauftragen können
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.