/* Duplicated here from hb-machinery.hh to avoid including it. */ template<typename Type> staticinlineconst Type& StructAtOffsetUnaligned(constvoid *P, unsignedint offset)
{ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wcast-align" return * reinterpret_cast<const Type*> ((constchar *) P + offset); #pragma GCC diagnostic pop
}
template <typename T> void set_array (bool v, const T *array, unsignedint count, unsignedint stride=sizeof(T))
{ if (unlikely (!successful)) return; if (!count) return;
dirty ();
hb_codepoint_t g = *array; while (count)
{ unsignedint m = get_major (g);
page_t *page = page_for (g, v); if (unlikely (v && !page)) return; unsignedint start = major_start (m); unsignedint end = major_start (m + 1); do
{ if (g != INVALID && (v || page)) /* The v check is to optimize out the page check if v is true. */
page->set (g, v);
array = &StructAtOffsetUnaligned<T> (array, stride);
count--;
} while (count && (g = *array, start <= g && g < end));
}
}
/* Might return false if array looks unsorted.
* Used for faster rejection of corrupt data. */ template <typename T> bool set_sorted_array (bool v, const T *array, unsignedint count, unsignedint stride=sizeof(T))
{ if (unlikely (!successful)) returntrue; /* https://github.com/harfbuzz/harfbuzz/issues/657 */ if (unlikely (!count)) returntrue;
dirty ();
hb_codepoint_t g = *array;
hb_codepoint_t last_g = g; while (count)
{ unsignedint m = get_major (g);
page_t *page = page_for (g, v); if (unlikely (v && !page)) returnfalse; unsignedint end = major_start (m + 1); do
{ /* If we try harder we can change the following comparison to <=;
* Not sure if it's worth it. */ if (g < last_g) returnfalse;
last_g = g;
if (g != INVALID && (v || page)) /* The v check is to optimize out the page check if v is true. */
page->add (g);
array = &StructAtOffsetUnaligned<T> (array, stride);
count--;
} while (count && (g = *array, g < end));
} returntrue;
}
void del (hb_codepoint_t g)
{ if (unlikely (!successful)) return;
page_t *page = page_for (g); if (!page) return;
dirty ();
page->del (g);
}
private: void del_pages (int ds, int de)
{ if (ds <= de)
{ // Pre-allocate the workspace that compact() will need so we can bail on allocation failure // before attempting to rewrite the page map.
hb_vector_t<unsigned> compact_workspace; if (unlikely (!allocate_compact_workspace (compact_workspace))) return;
unsignedint write_index = 0; for (unsignedint i = 0; i < page_map.length; i++)
{ int m = (int) page_map[i].major; if (m < ds || de < m)
page_map[write_index++] = page_map[i];
}
compact (compact_workspace, write_index);
resize (write_index);
}
}
public: void del_range (hb_codepoint_t a, hb_codepoint_t b)
{ if (unlikely (!successful)) return; if (unlikely (a > b || a == INVALID)) return;
dirty (); unsignedint ma = get_major (a); unsignedint mb = get_major (b); /* Delete pages from ds through de if ds <= de. */ int ds = (a == major_start (ma))? (int) ma: (int) (ma + 1); int de = (b + 1 == major_start (mb + 1))? (int) mb: ((int) mb - 1); if (ds > de || (int) ma < ds)
{
page_t *page = page_for (a); if (page)
{ if (ma == mb)
page->del_range (a, b); else
page->del_range (a, major_start (ma + 1) - 1);
}
} if (de < (int) mb && ma != mb)
{
page_t *page = page_for (b); if (page)
page->del_range (major_start (mb), b);
}
del_pages (ds, de);
}
bool get (hb_codepoint_t g) const
{ const page_t *page = page_for (g); if (!page) returnfalse; return page->get (g);
}
/* Has interface. */ booloperator [] (hb_codepoint_t k) const { return get (k); } bool has (hb_codepoint_t k) const { return (*this)[k]; } /* Predicate. */ booloperator () (hb_codepoint_t k) const { return has (k); }
bool intersects (hb_codepoint_t first, hb_codepoint_t last) const
{
hb_codepoint_t c = first - 1; return next (&c) && c <= last;
} void set (const hb_bit_set_t &other, bool exact_size = false)
{ if (unlikely (!successful)) return; unsignedint count = other.pages.length; if (unlikely (!resize (count, false, exact_size))) return;
population = other.population;
page_map = other.page_map;
pages = other.pages;
}
bool is_equal (const hb_bit_set_t &other) const
{ if (has_population () && other.has_population () &&
population != other.population) returnfalse;
unsignedint na = pages.length; unsignedint nb = other.pages.length;
unsignedint a = 0, b = 0; for (; a < na && b < nb; )
{ if (page_at (a).is_empty ()) { a++; continue; } if (other.page_at (b).is_empty ()) { b++; continue; } if (page_map[a].major != other.page_map[b].major ||
!page_at (a).is_equal (other.page_at (b))) returnfalse;
a++;
b++;
} for (; a < na; a++) if (!page_at (a).is_empty ()) { returnfalse; } for (; b < nb; b++) if (!other.page_at (b).is_empty ()) { returnfalse; }
returntrue;
}
bool is_subset (const hb_bit_set_t &larger_set) const
{ if (has_population () && larger_set.has_population () &&
population > larger_set.population) returnfalse;
unsignedint na = pages.length; unsignedint nb = other.pages.length; unsignedint next_page = na;
unsignedint count = 0, newCount = 0; unsignedint a = 0, b = 0; unsignedint write_index = 0;
// Pre-allocate the workspace that compact() will need so we can bail on allocation failure // before attempting to rewrite the page map.
hb_vector_t<unsigned> compact_workspace; if (!passthru_left && unlikely (!allocate_compact_workspace (compact_workspace))) return;
for (; a < na && b < nb; )
{ if (page_map[a].major == other.page_map[b].major)
{ if (!passthru_left)
{ // Move page_map entries that we're keeping from the left side set // to the front of the page_map vector. This isn't necessary if // passthru_left is set since no left side pages will be removed // in that case. if (write_index < a)
page_map[write_index] = page_map[a];
write_index++;
}
count++;
a++;
b++;
} elseif (page_map[a].major < other.page_map[b].major)
{ if (passthru_left)
count++;
a++;
} else
{ if (passthru_right)
count++;
b++;
}
} if (passthru_left)
count += na - a; if (passthru_right)
count += nb - b;
if (!passthru_left)
{
na = write_index;
next_page = write_index;
compact (compact_workspace, write_index);
}
if (unlikely (!resize (count))) return;
newCount = count;
/* Process in-place backward. */
a = na;
b = nb; for (; a && b; )
{ if (page_map.arrayZ[a - 1].major == other.page_map.arrayZ[b - 1].major)
{
a--;
b--;
count--;
page_map.arrayZ[count] = page_map.arrayZ[a];
page_at (count).v = op (page_at (a).v, other.page_at (b).v);
page_at (count).dirty ();
} elseif (page_map.arrayZ[a - 1].major > other.page_map.arrayZ[b - 1].major)
{
a--; if (passthru_left)
{
count--;
page_map.arrayZ[count] = page_map.arrayZ[a];
}
} else
{
b--; if (passthru_right)
{
count--;
page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
page_map.arrayZ[count].index = next_page++;
page_at (count) = other.page_at (b);
}
}
} if (passthru_left) while (a)
{
a--;
count--;
page_map.arrayZ[count] = page_map.arrayZ[a];
} if (passthru_right) while (b)
{
b--;
count--;
page_map.arrayZ[count].major = other.page_map.arrayZ[b].major;
page_map.arrayZ[count].index = next_page++;
page_at (count) = other.page_at (b);
}
assert (!count);
resize (newCount);
} template <typename Op> static hb_bit_page_t::vector_t
op_ (const hb_bit_page_t::vector_t &a, const hb_bit_page_t::vector_t &b)
{ return Op{} (a, b); } template <typename Op> void process (const Op& op, const hb_bit_set_t &other)
{
process_ (op_<Op>, op (1, 0), op (0, 1), other);
}
void union_ (const hb_bit_set_t &other) { process (hb_bitwise_or, other); } void intersect (const hb_bit_set_t &other) { process (hb_bitwise_and, other); } void subtract (const hb_bit_set_t &other) { process (hb_bitwise_gt, other); } void symmetric_difference (const hb_bit_set_t &other) { process (hb_bitwise_xor, other); }
i = *first; if (!previous (&i))
{
*last = *first = INVALID; returnfalse;
}
/* TODO Speed up. */
*last = *first = i; while (previous (&i) && i == *first - 1)
(*first)--;
returntrue;
}
unsignedint next_many (hb_codepoint_t codepoint,
hb_codepoint_t *out, unsignedint size) const
{ // By default, start at the first bit of the first page of values. unsignedint start_page = 0; unsignedint start_page_value = 0; if (unlikely (codepoint != INVALID))
{ constauto* page_map_array = page_map.arrayZ; unsignedint major = get_major (codepoint); unsignedint i = last_page_lookup; if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
page_map.bfind (major, &i, HB_NOT_FOUND_STORE_CLOSEST); if (i >= page_map.length) return 0; // codepoint is greater than our max element.
}
start_page = i;
start_page_value = page_remainder (codepoint + 1); if (unlikely (start_page_value == 0))
{ // The export-after value was last in the page. Start on next page.
start_page++;
start_page_value = 0;
}
}
unsignedint initial_size = size; for (unsignedint i = start_page; i < page_map.length && size; i++)
{
uint32_t base = major_start (page_map[i].major); unsignedint n = pages[page_map[i].index].write (base, start_page_value, out, size);
out += n;
size -= n;
start_page_value = 0;
} return initial_size - size;
}
unsignedint next_many_inverted (hb_codepoint_t codepoint,
hb_codepoint_t *out, unsignedint size) const
{ unsignedint initial_size = size; // By default, start at the first bit of the first page of values. unsignedint start_page = 0; unsignedint start_page_value = 0; if (unlikely (codepoint != INVALID))
{ constauto* page_map_array = page_map.arrayZ; unsignedint major = get_major (codepoint); unsignedint i = last_page_lookup; if (unlikely (i >= page_map.length || page_map_array[i].major != major))
{
page_map.bfind(major, &i, HB_NOT_FOUND_STORE_CLOSEST); if (unlikely (i >= page_map.length))
{ // codepoint is greater than our max element. while (++codepoint != INVALID && size)
{
*out++ = codepoint;
size--;
} return initial_size - size;
}
}
start_page = i;
start_page_value = page_remainder (codepoint + 1); if (unlikely (start_page_value == 0))
{ // The export-after value was last in the page. Start on next page.
start_page++;
start_page_value = 0;
}
}
/* The extra page_map length is necessary; can't just rely on vector here, * since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actually major==0 page... */ unsigned i = last_page_lookup; if (likely (i < page_map.length))
{ auto &cached_page = page_map.arrayZ[i]; if (cached_page.major == major) return &pages.arrayZ[cached_page.index];
}
page_map_t map = {major, pages.length}; if (!page_map.bfind (map, &i, HB_NOT_FOUND_STORE_CLOSEST))
{ if (!insert) return nullptr;
if (unlikely (!resize (pages.length + 1))) return nullptr;
/* The extra page_map length is necessary; can't just rely on vector here, * since the next check would be tricked because a null page also has
* major==0, which we can't distinguish from an actually major==0 page... */ unsigned i = last_page_lookup; if (likely (i < page_map.length))
{ auto &cached_page = page_map.arrayZ[i]; if (cached_page.major == major) return &pages.arrayZ[cached_page.index];
}
page_map_t key = {major}; if (!page_map.bfind (key, &i)) return nullptr;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.