SkRTree::Node* SkRTree::allocateNodeAtLevel(uint16_t level) {
SkDEBUGCODE(Node* p = fNodes.data());
fNodes.push_back(Node{});
Node& out = fNodes.back();
SkASSERT(fNodes.data() == p); // If this fails, we didn't reserve() enough.
out.fNumChildren = 0;
out.fLevel = level; return &out;
}
// This function parallels bulkLoad, but just counts how many nodes bulkLoad would allocate. int SkRTree::CountNodes(int branches) { if (branches == 1) { return 1;
} int remainder = branches % kMaxChildren; if (remainder > 0) { if (remainder >= kMinChildren) {
remainder = 0;
} else {
remainder = kMinChildren - remainder;
}
} int currentBranch = 0; int nodes = 0; while (currentBranch < branches) { int incrementBy = kMaxChildren; if (remainder != 0) { if (remainder <= kMaxChildren - kMinChildren) {
incrementBy -= remainder;
remainder = 0;
} else {
incrementBy = kMinChildren;
remainder -= kMaxChildren - kMinChildren;
}
}
nodes++;
currentBranch++; for (int k = 1; k < incrementBy && currentBranch < branches; ++k) {
currentBranch++;
}
} return nodes + CountNodes(nodes);
}
SkRTree::Branch SkRTree::bulkLoad(std::vector<Branch>* branches, int level) { if (branches->size() == 1) { // Only one branch. It will be the root. return (*branches)[0];
}
// We might sort our branches here, but we expect Blink gives us a reasonable x,y order. // Skipping a call to sort (in Y) here resulted in a 17% win for recording with negligible // difference in playback speed. int remainder = (int)branches->size() % kMaxChildren; int newBranches = 0;
if (remainder > 0) { // If the remainder isn't enough to fill a node, we'll add fewer nodes to other branches. if (remainder >= kMinChildren) {
remainder = 0;
} else {
remainder = kMinChildren - remainder;
}
}
int currentBranch = 0; while (currentBranch < (int)branches->size()) { int incrementBy = kMaxChildren; if (remainder != 0) { // if need be, omit some nodes to make up for remainder if (remainder <= kMaxChildren - kMinChildren) {
incrementBy -= remainder;
remainder = 0;
} else {
incrementBy = kMinChildren;
remainder -= kMaxChildren - kMinChildren;
}
}
Node* n = allocateNodeAtLevel(level);
n->fNumChildren = 1;
n->fChildren[0] = (*branches)[currentBranch];
Branch b;
b.fBounds = (*branches)[currentBranch].fBounds;
b.fSubtree = n;
++currentBranch; for (int k = 1; k < incrementBy && currentBranch < (int)branches->size(); ++k) {
b.fBounds.join((*branches)[currentBranch].fBounds);
n->fChildren[k] = (*branches)[currentBranch];
++n->fNumChildren;
++currentBranch;
}
(*branches)[newBranches] = b;
++newBranches;
}
branches->resize(newBranches); return this->bulkLoad(branches, level + 1);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.