staticinline bool _presplit_subtables_if_needed (graph::gsubgpos_graph_context_t& ext_context)
{ // For each lookup this will check the size of subtables and split them as needed // so that no subtable is at risk of overflowing. (where we support splitting for // that subtable type). // // TODO(grieger): de-dup newly added nodes as necessary. Probably just want a full de-dup // pass after this processing is done. Not super necessary as splits are // only done where overflow is likely, so de-dup probably will get undone // later anyways.
// The loop below can modify the contents of ext_context.lookups if new subtables are added // to a lookup during a split. So save the initial set of lookup indices so the iteration doesn't // risk access free'd memory if ext_context.lookups gets resized.
hb_set_t lookup_indices(ext_context.lookups.keys ()); for (unsigned lookup_index : lookup_indices)
{
graph::Lookup* lookup = ext_context.lookups.get(lookup_index); if (!lookup->split_subtables_if_needed (ext_context, lookup_index)) returnfalse;
}
returntrue;
}
/* * Analyze the lookups in a GSUB/GPOS table and decide if any should be promoted * to extension lookups.
*/ staticinline bool _promote_extensions_if_needed (graph::gsubgpos_graph_context_t& ext_context)
{ // Simple Algorithm (v1, current): // 1. Calculate how many bytes each non-extension lookup consumes. // 2. Select up to 64k of those to remain as non-extension (greedy, highest subtables per byte first) // 3. Promote the rest. // // Advanced Algorithm (v2, not implemented): // 1. Perform connected component analysis using lookups as roots. // 2. Compute size of each connected component. // 3. Select up to 64k worth of connected components to remain as non-extensions. // (greedy, highest subtables per byte first) // 4. Promote the rest.
// TODO(garretrieger): support extension demotion, then consider all lookups. Requires advanced algo. // TODO(garretrieger): also support extension promotion during iterative resolution phase, then // we can use a less conservative threshold here. // TODO(grieger): skip this for the 24 bit case. if (!ext_context.lookups) returntrue;
// Start by assuming all lookups are using extension subtables, this size will be removed later // if it's decided to not make a lookup extension. for (auto p : lookup_sizes)
{ // TODO(garretrieger): this overestimates the extension subtables size because some extension subtables may be // reused. However, we can't correct this until we have connected component analysis in place. unsigned subtables_size = p.num_subtables * 8;
l3_l4_size += subtables_size;
l4_plus_size += subtables_size;
}
bool layers_full = false; for (auto p : lookup_sizes)
{ const graph::Lookup* lookup = ext_context.lookups.get(p.lookup_index); if (lookup->is_extension (ext_context.table_tag)) // already an extension so size is counted by the loop above. continue;
for (int i = overflows.length - 1; i >= 0; i--)
{ const graph::overflow_record_t& r = overflows[i];
unsigned root; unsigned overflow_space = sorted_graph.space_for (r.parent, &root); if (!overflow_space) continue; if (sorted_graph.num_roots_for_space (overflow_space) <= 1) continue;
if (!space) {
space = overflow_space;
}
if (space == overflow_space)
roots_to_isolate.add(root);
}
if (!roots_to_isolate) returnfalse;
unsigned maximum_to_move = hb_max ((sorted_graph.num_roots_for_space (space) / 2u), 1u); if (roots_to_isolate.get_population () > maximum_to_move) { // Only move at most half of the roots in a space at a time. unsigned extra = roots_to_isolate.get_population () - maximum_to_move; while (extra--) {
uint32_t root = HB_SET_VALUE_INVALID;
roots_to_isolate.previous (&root);
roots_to_isolate.del (root);
}
}
DEBUG_MSG (SUBSET_REPACK, nullptr, "Overflow in space %u (%u roots). Moving %u roots to space %u.",
space,
sorted_graph.num_roots_for_space (space),
roots_to_isolate.get_population (),
sorted_graph.next_space ());
staticinline bool _resolve_shared_overflow(const hb_vector_t<graph::overflow_record_t>& overflows, int overflow_index,
graph_t& sorted_graph)
{ const graph::overflow_record_t& r = overflows[overflow_index];
// Find all of the parents in overflowing links that link to this // same child node. We will then try duplicating the child node and // re-assigning all of these parents to the duplicate.
hb_set_t parents;
parents.add(r.parent); for (int i = overflow_index - 1; i >= 0; i--) { const graph::overflow_record_t& r2 = overflows[i]; if (r2.child == r.child) {
parents.add(r2.parent);
}
}
unsigned result = sorted_graph.duplicate(&parents, r.child); if (result == (unsigned) -1 && parents.get_population() > 2) { // All links to the child are overflowing, so we can't include all // in the duplication. Remove one parent from the duplication. // Remove the lowest index parent, which will be the closest to the child.
parents.del(parents.get_min());
result = sorted_graph.duplicate(&parents, r.child);
}
if (result == (unsigned) -1) return result;
if (parents.get_population() > 1) { // If the duplicated node has more than one parent pre-emptively raise it's priority to the maximum. // This will place it close to the parents. Node's with only one parent, don't need this as normal overflow // resolution will raise priority if needed. // // Reasoning: most of the parents to this child are likely at the same layer in the graph. Duplicating // the child will theoretically allow it to be placed closer to it's parents. However, due to the shortest // distance sort by default it's placement will remain in the same layer, thus it will remain in roughly the // same position (and distance from parents) as the original child node. The overflow resolution will attempt // to move nodes closer, but only for non-shared nodes. Since this node is shared, it will simply be given // further duplication which defeats the attempt to duplicate with multiple parents. To fix this we // pre-emptively raise priority now which allows the duplicated node to pack into the same layer as it's parents.
sorted_graph.vertices_[result].give_max_priority();
}
// Try resolving the furthest overflows first. for (int i = overflows.length - 1; i >= 0; i--)
{ const graph::overflow_record_t& r = overflows[i]; constauto& child = sorted_graph.vertices_[r.child]; if (child.is_shared ())
{ // The child object is shared, we may be able to eliminate the overflow // by duplicating it. if (!_resolve_shared_overflow(overflows, i, sorted_graph)) continue; returntrue;
}
if (child.is_leaf () && !priority_bumped_parents.has (r.parent))
{ // This object is too far from it's parent, attempt to move it closer. // // TODO(garretrieger): initially limiting this to leaf's since they can be // moved closer with fewer consequences. However, this can // likely can be used for non-leafs as well. // TODO(garretrieger): also try lowering priority of the parent. Make it // get placed further up in the ordering, closer to it's children. // this is probably preferable if the total size of the parent object // is < then the total size of the children (and the parent can be moved). // Since in that case moving the parent will cause a smaller increase in // the length of other offsets. if (sorted_graph.raise_childrens_priority (r.parent)) {
priority_bumped_parents.add (r.parent);
resolution_attempted = true;
} continue;
}
DEBUG_MSG (SUBSET_REPACK, nullptr, "Promoting lookups to extensions if needed."); if (!_promote_extensions_if_needed (ext_context)) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Extensions promotion failed."); returnfalse;
}
}
DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs."); if (sorted_graph.assign_spaces ())
sorted_graph.sort_shortest_distance (); else
sorted_graph.sort_shortest_distance_if_needed ();
}
unsigned round = 0;
hb_vector_t<graph::overflow_record_t> overflows; // TODO(garretrieger): select a good limit for max rounds. while (!sorted_graph.in_error ()
&& graph::will_overflow (sorted_graph, &overflows)
&& round < max_rounds) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "=== Overflow resolution round %u ===", round);
print_overflows (sorted_graph, overflows);
hb_set_t priority_bumped_parents;
if (!_try_isolating_subgraphs (overflows, sorted_graph))
{ // Don't count space isolation towards round limit. Only increment // round counter if space isolation made no changes.
round++; if (!_process_overflows (overflows, priority_bumped_parents, sorted_graph))
{
DEBUG_MSG (SUBSET_REPACK, nullptr, "No resolution available :("); break;
}
}
sorted_graph.sort_shortest_distance ();
}
if (sorted_graph.in_error ())
{
DEBUG_MSG (SUBSET_REPACK, nullptr, "Sorted graph in error state."); returnfalse;
}
if (graph::will_overflow (sorted_graph))
{ if (is_gsub_or_gpos && !always_recalculate_extensions) { // If this a GSUB/GPOS table and we didn't try to extension promotion and table splitting then // as a last ditch effort, re-run the repacker with it enabled.
DEBUG_MSG (SUBSET_REPACK, nullptr, "Failed to find a resolution. Re-running with extension promotion and table splitting enabled."); return hb_resolve_graph_overflows (table_tag, max_rounds, true, sorted_graph);
}
/* * Attempts to modify the topological sorting of the provided object graph to * eliminate offset overflows in the links between objects of the graph. If a * non-overflowing ordering is found the updated graph is serialized it into the * provided serialization context. * * If necessary the structure of the graph may be modified in ways that do not * affect the functionality of the graph. For example shared objects may be * duplicated. * * For a detailed writeup describing how the algorithm operates see: * docs/repacker.md
*/ template<typename T> inline hb_blob_t*
hb_resolve_overflows (const T& packed,
hb_tag_t table_tag, unsigned max_rounds = 32, bool recalculate_extensions = false) {
graph_t sorted_graph (packed); if (sorted_graph.in_error ())
{ // Invalid graph definition. return nullptr;
}
if (!sorted_graph.is_fully_connected ())
{
sorted_graph.print_orphaned_nodes (); return nullptr;
}
if (sorted_graph.in_error ())
{ // Allocations failed somewhere
DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is in error, likely due to a memory allocation error."); return nullptr;
}
if (!hb_resolve_graph_overflows (table_tag, max_rounds, recalculate_extensions, sorted_graph)) return nullptr;
return graph::serialize (sorted_graph);
}
#endif/* HB_REPACKER_HH */
Messung V0.5
¤ Dauer der Verarbeitung: 0.1 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.