// Copyright (c) the JPEG XL Project Authors. All rights reserved. // // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file.
Status MemoryManagerInit(JxlMemoryManager* self, const JxlMemoryManager* memory_manager) { if (memory_manager) {
*self = *memory_manager;
} else {
memset(self, 0, sizeof(*self));
} bool is_default_alloc = (self->alloc == nullptr); bool is_default_free = (self->free == nullptr); if (is_default_alloc != is_default_free) { returnfalse;
} if (is_default_alloc) self->alloc = jxl::MemoryManagerDefaultAlloc; if (is_default_free) self->free = jxl::MemoryManagerDefaultFree;
returntrue;
}
size_t BytesPerRow(const size_t xsize, const size_t sizeof_t) { // Special case: we don't allow any ops -> don't need extra padding/ if (xsize == 0) { return 0;
}
// Allow unaligned accesses starting at the last valid value. // Skip for the scalar case because no extra lanes will be loaded. if (vec_size != 0) {
valid_bytes += vec_size - sizeof_t;
}
// Round up to vector and cache line size. const size_t align = std::max(vec_size, memory_manager_internal::kAlignment);
size_t bytes_per_row = RoundUpTo(valid_bytes, align);
// During the lengthy window before writes are committed to memory, CPUs // guard against read after write hazards by checking the address, but // only the lower 11 bits. We avoid a false dependency between writes to // consecutive rows by ensuring their sizes are not multiples of 2 KiB. // Avoid2K prevents the same problem for the planes of an Image3. if (bytes_per_row % memory_manager_internal::kAlias == 0) {
bytes_per_row += align;
}
AlignedMemory::AlignedMemory(JxlMemoryManager* memory_manager, void* allocation,
size_t pre_padding)
: allocation_(allocation), memory_manager_(memory_manager) { // Congruence to `offset` (mod kAlias) reduces cache conflicts and load/store // stalls, especially with large allocations that would otherwise have similar // alignments. static std::atomic<uint32_t> next_group{0};
size_t group = static_cast<size_t>(next_group.fetch_add(1, std::memory_order_relaxed));
group &= (memory_manager_internal::kNumAlignmentGroups - 1);
size_t offset = memory_manager_internal::kAlignment * group;
// Actual allocation.
uintptr_t address = reinterpret_cast<uintptr_t>(allocation) + pre_padding;
// Aligned address, but might land before allocation (50%/50%) or not have // enough pre-padding.
uintptr_t aligned_address =
(address & ~(memory_manager_internal::kAlias - 1)) + offset; if (aligned_address < address)
aligned_address += memory_manager_internal::kAlias;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.