/*
Formatting library for C++
Copyright (c) 2012 - present, Victor Zverovich
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- Optional exception to the license ---
As an exception, if, as a result of your compiling your source code, portions
of this Software are embedded into a machine-executable object form of such
source code, you may redistribute such embedded portions in such object form
without including the above copyright and permission notices.
*/
#ifndef FMT_FORMAT_H_
#define FMT_FORMAT_H_
#include "double-conversion/double-to-string.h"
#ifndef _LIBCPP_REMOVE_TRANSITIVE_INCLUDES
# define _LIBCPP_REMOVE_TRANSITIVE_INCLUDES
# define FMT_REMOVE_TRANSITIVE_INCLUDES
#endif
#include "base.h"
#ifndef FMT_MODULE
# include <cmath>
// std::signbit
# include <cstddef>
// std::byte
# include <cstdint>
// uint32_t
# include <cstring>
// std::memcpy
# include <initializer_list>
// std::initializer_list
# include <limits>
// std::numeric_limits
# include <
new>
// std::bad_alloc
# if defined(__GLIBCXX__) && !
defined(_GLIBCXX_USE_DUAL_ABI)
// Workaround for pre gcc 5 libstdc++.
# include <memory>
// std::allocator_traits
# endif
# include <stdexcept>
// std::runtime_error
# include <string>
// std::string
# include <system_error>
// std::system_error
// Checking FMT_CPLUSPLUS for warning suppression in MSVC.
# if FMT_HAS_INCLUDE(<bit>) && FMT_CPLUSPLUS > 201703L
# include <bit>
// std::bit_cast
# endif
// libc++ supports string_view in pre-c++17.
# if FMT_HAS_INCLUDE(<string_view>) && \
(FMT_CPLUSPLUS >= 201703L ||
defined(_LIBCPP_VERSION))
# include <string_view>
# define FMT_USE_STRING_VIEW
# endif
# if FMT_MSC_VERSION
# include <intrin.h>
// _BitScanReverse[64], _BitScanForward[64], _umul128
# endif
#endif // FMT_MODULE
#if defined(FMT_USE_NONTYPE_TEMPLATE_ARGS)
// Use the provided definition.
#elif defined(__NVCOMPILER)
# define FMT_USE_NONTYPE_TEMPLATE_ARGS 0
#elif FMT_GCC_VERSION >= 903 && FMT_CPLUSPLUS >= 201709L
# define FMT_USE_NONTYPE_TEMPLATE_ARGS 1
#elif defined(__cpp_nontype_template_args) && \
__cpp_nontype_template_args >= 201911L
# define FMT_USE_NONTYPE_TEMPLATE_ARGS 1
#elif FMT_CLANG_VERSION >= 1200 && FMT_CPLUSPLUS >= 202002L
# define FMT_USE_NONTYPE_TEMPLATE_ARGS 1
#else
# define FMT_USE_NONTYPE_TEMPLATE_ARGS 0
#endif
#if defined __cpp_inline_variables && __cpp_inline_variables >= 201606L
# define FMT_INLINE_VARIABLE
inline
#else
# define FMT_INLINE_VARIABLE
#endif
// Check if RTTI is disabled.
#ifdef FMT_USE_RTTI
// Use the provided definition.
#elif defined(__GXX_RTTI) || FMT_HAS_FEATURE(cxx_rtti) ||
defined(_CPPRTTI) || \
defined(__INTEL_RTTI__) ||
defined(__RTTI)
// __RTTI is for EDG compilers. _CPPRTTI is for MSVC.
# define FMT_USE_RTTI 1
#else
# define FMT_USE_RTTI 0
#endif
// Visibility when compiled as a shared library/object.
#if defined(FMT_LIB_EXPORT) ||
defined(FMT_SHARED)
# define FMT_SO_VISIBILITY(value) FMT_VISIBILITY(value)
#else
# define FMT_SO_VISIBILITY(value)
#endif
#if FMT_GCC_VERSION || FMT_CLANG_VERSION
# define FMT_NOINLINE __attribute__((noinline))
#else
# define FMT_NOINLINE
#endif
namespace std {
template <
class T>
struct iterator_traits<fmt::basic_appender<T>> {
using iterator_category = output_iterator_tag;
using value_type = T;
using difference_type =
decltype(
static_cast<
int*>(nullptr) -
static_cast<
int*>(nullptr));
using pointer =
void;
using reference =
void;
};
}
// namespace std
#ifndef FMT_THROW
# if FMT_USE_EXCEPTIONS
# if FMT_MSC_VERSION ||
defined(__NVCC__)
FMT_BEGIN_NAMESPACE
namespace detail {
template <
typename Exception>
inline void do_throw(
const Exception& x) {
// Silence unreachable code warnings in MSVC and NVCC because these
// are nearly impossible to fix in a generic code.
volatile bool b =
true;
if (b)
throw x;
}
}
// namespace detail
FMT_END_NAMESPACE
# define FMT_THROW(x) detail::do_throw(x)
# else
# define FMT_THROW(x)
throw x
# endif
# else
# define FMT_THROW(x) \
::fmt::detail::assert_fail(__FILE__, __LINE__, (x).what())
# endif
#endif
#ifdef FMT_NO_UNIQUE_ADDRESS
// Use the provided definition.
#elif FMT_CPLUSPLUS < 202002L
// Not supported.
#elif FMT_HAS_CPP_ATTRIBUTE(no_unique_address)
# define FMT_NO_UNIQUE_ADDRESS [[no_unique_address]]
// VS2019 v16.10 and later except clang-cl (https://reviews.llvm.org/D110485).
#elif FMT_MSC_VERSION >= 1929 && !FMT_CLANG_VERSION
# define FMT_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]]
#endif
#ifndef FMT_NO_UNIQUE_ADDRESS
# define FMT_NO_UNIQUE_ADDRESS
#endif
// Defining FMT_REDUCE_INT_INSTANTIATIONS to 1, will reduce the number of
// integer formatter template instantiations to just one by only using the
// largest integer type. This results in a reduction in binary size but will
// cause a decrease in integer formatting performance.
#if !
defined(FMT_REDUCE_INT_INSTANTIATIONS)
# define FMT_REDUCE_INT_INSTANTIATIONS 0
#endif
// __builtin_clz is broken in clang with Microsoft codegen:
// https://github.com/fmtlib/fmt/issues/519.
#if !FMT_MSC_VERSION
# if FMT_HAS_BUILTIN(__builtin_clz) || FMT_GCC_VERSION || FMT_ICC_VERSION
# define FMT_BUILTIN_CLZ(n) __builtin_clz(n)
# endif
# if FMT_HAS_BUILTIN(__builtin_clzll) || FMT_GCC_VERSION || FMT_ICC_VERSION
# define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n)
# endif
#endif
// __builtin_ctz is broken in Intel Compiler Classic on Windows:
// https://github.com/fmtlib/fmt/issues/2510.
#ifndef __ICL
# if FMT_HAS_BUILTIN(__builtin_ctz) || FMT_GCC_VERSION || FMT_ICC_VERSION || \
defined(__NVCOMPILER)
# define FMT_BUILTIN_CTZ(n) __builtin_ctz(n)
# endif
# if FMT_HAS_BUILTIN(__builtin_ctzll) || FMT_GCC_VERSION || \
FMT_ICC_VERSION ||
defined(__NVCOMPILER)
# define FMT_BUILTIN_CTZLL(n) __builtin_ctzll(n)
# endif
#endif
// Some compilers masquerade as both MSVC and GCC-likes or otherwise support
// __builtin_clz and __builtin_clzll, so only define FMT_BUILTIN_CLZ using the
// MSVC intrinsics if the clz and clzll builtins are not available.
#if FMT_MSC_VERSION && !
defined(FMT_BUILTIN_CLZLL) && \
!
defined(FMT_BUILTIN_CTZLL)
FMT_BEGIN_NAMESPACE
namespace detail {
// Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning.
# if !
defined(__clang__)
# pragma intrinsic(_BitScanForward)
# pragma intrinsic(_BitScanReverse)
# if defined(_WIN64)
# pragma intrinsic(_BitScanForward64)
# pragma intrinsic(_BitScanReverse64)
# endif
# endif
inline auto clz(uint32_t x) ->
int {
unsigned long r = 0;
_BitScanReverse(&r, x);
FMT_ASSERT(x != 0,
"");
// Static analysis complains about using uninitialized data
// "r", but the only way that can happen is if "x" is 0,
// which the callers guarantee to not happen.
FMT_MSC_WARNING(suppress : 6102)
return 31 ^
static_cast<
int>(r);
}
# define FMT_BUILTIN_CLZ(n) detail::clz(n)
inline auto clzll(uint64_t x) ->
int {
unsigned long r = 0;
# ifdef _WIN64
_BitScanReverse64(&r, x);
# else
// Scan the high 32 bits.
if (_BitScanReverse(&r,
static_cast<uint32_t>(x >> 32)))
return 63 ^
static_cast<
int>(r + 32);
// Scan the low 32 bits.
_BitScanReverse(&r,
static_cast<uint32_t>(x));
# endif
FMT_ASSERT(x != 0,
"");
FMT_MSC_WARNING(suppress : 6102)
// Suppress a bogus static analysis warning.
return 63 ^
static_cast<
int>(r);
}
# define FMT_BUILTIN_CLZLL(n) detail::clzll(n)
inline auto ctz(uint32_t x) ->
int {
unsigned long r = 0;
_BitScanForward(&r, x);
FMT_ASSERT(x != 0,
"");
FMT_MSC_WARNING(suppress : 6102)
// Suppress a bogus static analysis warning.
return static_cast<
int>(r);
}
# define FMT_BUILTIN_CTZ(n) detail::ctz(n)
inline auto ctzll(uint64_t x) ->
int {
unsigned long r = 0;
FMT_ASSERT(x != 0,
"");
FMT_MSC_WARNING(suppress : 6102)
// Suppress a bogus static analysis warning.
# ifdef _WIN64
_BitScanForward64(&r, x);
# else
// Scan the low 32 bits.
if (_BitScanForward(&r,
static_cast<uint32_t>(x)))
return static_cast<
int>(r);
// Scan the high 32 bits.
_BitScanForward(&r,
static_cast<uint32_t>(x >> 32));
r += 32;
# endif
return static_cast<
int>(r);
}
# define FMT_BUILTIN_CTZLL(n) detail::ctzll(n)
}
// namespace detail
FMT_END_NAMESPACE
#endif
FMT_BEGIN_NAMESPACE
template <
typename Char,
typename Traits,
typename Allocator>
struct is_contiguous<std::basic_string<
Char, Traits, Allocator>>
: std::true_type {};
namespace detail {
FMT_CONSTEXPR
inline void abort_fuzzing_if(
bool condition) {
ignore_unused(condition);
#ifdef FMT_FUZZ
if (condition)
throw std::runtime_error(
"fuzzing limit reached");
#endif
}
#if defined(FMT_USE_STRING_VIEW)
template <
typename Char>
using std_string_view = std::basic_string_view<
Char>;
#else
template <
typename T>
struct std_string_view {};
#endif
template <
typename Char,
Char... C>
struct string_literal {
static constexpr
Char value[
sizeof...(C)] = {C...};
constexpr
operator basic_string_view<
Char>()
const {
return {value,
sizeof...(C)};
}
};
#if FMT_CPLUSPLUS < 201703L
template <
typename Char,
Char... C>
constexpr
Char string_literal<
Char, C...>::value[
sizeof...(C)];
#endif
// Implementation of std::bit_cast for pre-C++20.
template <
typename To,
typename From, FMT_ENABLE_IF(
sizeof(To) ==
sizeof(From))>
FMT_CONSTEXPR20
auto bit_cast(
const From& from) -> To {
#ifdef __cpp_lib_bit_cast
if (is_constant_evaluated())
return std::bit_cast<To>(from);
#endif
auto to = To();
// The cast suppresses a bogus -Wclass-memaccess on GCC.
std::memcpy(
static_cast<
void*>(&to), &from,
sizeof(to));
return to;
}
inline auto is_big_endian() ->
bool {
#ifdef _WIN32
return false;
#elif defined(__BIG_ENDIAN__)
return true;
#elif defined(__BYTE_ORDER__) &&
defined(__ORDER_BIG_ENDIAN__)
return __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__;
#else
struct bytes {
char data[
sizeof(
int)];
};
return bit_cast<bytes>(1).data[0] == 0;
#endif
}
class uint128_fallback {
private:
uint64_t lo_, hi_;
public:
constexpr uint128_fallback(uint64_t hi, uint64_t lo) : lo_(lo), hi_(hi) {}
constexpr uint128_fallback(uint64_t value = 0) : lo_(value), hi_(0) {}
constexpr
auto high()
const noexcept -> uint64_t {
return hi_; }
constexpr
auto low()
const noexcept -> uint64_t {
return lo_; }
template <
typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>
constexpr
explicit operator T()
const {
return static_cast<T>(lo_);
}
friend constexpr
auto operator==(
const uint128_fallback& lhs,
const uint128_fallback& rhs) ->
bool {
return lhs.hi_ == rhs.hi_ && lhs.lo_ == rhs.lo_;
}
friend constexpr
auto operator!=(
const uint128_fallback& lhs,
const uint128_fallback& rhs) ->
bool {
return !(lhs == rhs);
}
friend constexpr
auto operator>(
const uint128_fallback& lhs,
const uint128_fallback& rhs) ->
bool {
return lhs.hi_ != rhs.hi_ ? lhs.hi_ > rhs.hi_ : lhs.lo_ > rhs.lo_;
}
friend constexpr
auto operator|(
const uint128_fallback& lhs,
const uint128_fallback& rhs)
-> uint128_fallback {
return {lhs.hi_ | rhs.hi_, lhs.lo_ | rhs.lo_};
}
friend constexpr
auto operator&(
const uint128_fallback& lhs,
const uint128_fallback& rhs)
-> uint128_fallback {
return {lhs.hi_ & rhs.hi_, lhs.lo_ & rhs.lo_};
}
friend constexpr
auto operator~(
const uint128_fallback& n)
-> uint128_fallback {
return {~n.hi_, ~n.lo_};
}
friend FMT_CONSTEXPR
auto operator+(
const uint128_fallback& lhs,
const uint128_fallback& rhs)
-> uint128_fallback {
auto result = uint128_fallback(lhs);
result += rhs;
return result;
}
friend FMT_CONSTEXPR
auto operator*(
const uint128_fallback& lhs, uint32_t rhs)
-> uint128_fallback {
FMT_ASSERT(lhs.hi_ == 0,
"");
uint64_t hi = (lhs.lo_ >> 32) * rhs;
uint64_t lo = (lhs.lo_ & ~uint32_t()) * rhs;
uint64_t new_lo = (hi << 32) + lo;
return {(hi >> 32) + (new_lo < lo ? 1 : 0), new_lo};
}
friend constexpr
auto operator-(
const uint128_fallback& lhs, uint64_t rhs)
-> uint128_fallback {
return {lhs.hi_ - (lhs.lo_ < rhs ? 1 : 0), lhs.lo_ - rhs};
}
FMT_CONSTEXPR
auto operator>>(
int shift)
const -> uint128_fallback {
if (shift == 64)
return {0, hi_};
if (shift > 64)
return uint128_fallback(0, hi_) >> (shift - 64);
return {hi_ >> shift, (hi_ << (64 - shift)) | (lo_ >> shift)};
}
FMT_CONSTEXPR
auto operator<<(
int shift)
const -> uint128_fallback {
if (shift == 64)
return {lo_, 0};
if (shift > 64)
return uint128_fallback(lo_, 0) << (shift - 64);
return {hi_ << shift | (lo_ >> (64 - shift)), (lo_ << shift)};
}
FMT_CONSTEXPR
auto operator>>=(
int shift) -> uint128_fallback& {
return *
this = *
this >> shift;
}
FMT_CONSTEXPR
void operator+=(uint128_fallback n) {
uint64_t new_lo = lo_ + n.lo_;
uint64_t new_hi = hi_ + n.hi_ + (new_lo < lo_ ? 1 : 0);
FMT_ASSERT(new_hi >= hi_,
"");
lo_ = new_lo;
hi_ = new_hi;
}
FMT_CONSTEXPR
void operator&=(uint128_fallback n) {
lo_ &= n.lo_;
hi_ &= n.hi_;
}
FMT_CONSTEXPR20
auto operator+=(uint64_t n) noexcept -> uint128_fallback& {
if (is_constant_evaluated()) {
lo_ += n;
hi_ += (lo_ < n ? 1 : 0);
return *
this;
}
#if FMT_HAS_BUILTIN(__builtin_addcll) && !
defined(__ibmxl__)
unsigned long long carry;
lo_ = __builtin_addcll(lo_, n, 0, &carry);
hi_ += carry;
#elif FMT_HAS_BUILTIN(__builtin_ia32_addcarryx_u64) && !
defined(__ibmxl__)
unsigned long long result;
auto carry = __builtin_ia32_addcarryx_u64(0, lo_, n, &result);
lo_ = result;
hi_ += carry;
#elif defined(_MSC_VER) &&
defined(_M_X64)
auto carry = _addcarry_u64(0, lo_, n, &lo_);
_addcarry_u64(carry, hi_, 0, &hi_);
#else
lo_ += n;
hi_ += (lo_ < n ? 1 : 0);
#endif
return *
this;
}
};
using uint128_t = conditional_t<FMT_USE_INT128, uint128_opt, uint128_fallback>;
#ifdef UINTPTR_MAX
using uintptr_t = ::uintptr_t;
#else
using uintptr_t = uint128_t;
#endif
// Returns the largest possible value for type T. Same as
// std::numeric_limits<T>::max() but shorter and not affected by the max macro.
template <
typename T> constexpr
auto max_value() -> T {
return (std::numeric_limits<T>::max)();
}
template <
typename T> constexpr
auto num_bits() ->
int {
return std::numeric_limits<T>::digits;
}
// std::numeric_limits<T>::digits may return 0 for 128-bit ints.
template <> constexpr
auto num_bits<int128_opt>() ->
int {
return 128; }
template <> constexpr
auto num_bits<uint128_opt>() ->
int {
return 128; }
template <> constexpr
auto num_bits<uint128_fallback>() ->
int {
return 128; }
// A heterogeneous bit_cast used for converting 96-bit long double to uint128_t
// and 128-bit pointers to uint128_fallback.
template <
typename To,
typename From, FMT_ENABLE_IF(
sizeof(To) >
sizeof(From))>
inline auto bit_cast(
const From& from) -> To {
constexpr
auto size =
static_cast<
int>(
sizeof(From) /
sizeof(
unsigned));
struct data_t {
unsigned value[
static_cast<
unsigned>(size)];
} data = bit_cast<data_t>(from);
auto result = To();
if (const_check(is_big_endian())) {
for (
int i = 0; i < size; ++i)
result = (result << num_bits<
unsigned>()) | data.value[i];
}
else {
for (
int i = size - 1; i >= 0; --i)
result = (result << num_bits<
unsigned>()) | data.value[i];
}
return result;
}
template <
typename UInt>
FMT_CONSTEXPR20
inline auto countl_zero_fallback(UInt n) ->
int {
int lz = 0;
constexpr UInt msb_mask =
static_cast<UInt>(1) << (num_bits<UInt>() - 1);
for (; (n & msb_mask) == 0; n <<= 1) lz++;
return lz;
}
FMT_CONSTEXPR20
inline auto countl_zero(uint32_t n) ->
int {
#ifdef FMT_BUILTIN_CLZ
if (!is_constant_evaluated())
return FMT_BUILTIN_CLZ(n);
#endif
return countl_zero_fallback(n);
}
FMT_CONSTEXPR20
inline auto countl_zero(uint64_t n) ->
int {
#ifdef FMT_BUILTIN_CLZLL
if (!is_constant_evaluated())
return FMT_BUILTIN_CLZLL(n);
#endif
return countl_zero_fallback(n);
}
FMT_INLINE
void assume(
bool condition) {
(
void)condition;
#if FMT_HAS_BUILTIN(__builtin_assume) && !FMT_ICC_VERSION
__builtin_assume(condition);
#elif FMT_GCC_VERSION
if (!condition) __builtin_unreachable();
#endif
}
// Attempts to reserve space for n extra characters in the output range.
// Returns a pointer to the reserved range or a reference to it.
template <
typename OutputIt,
FMT_ENABLE_IF(is_back_insert_iterator<OutputIt>::value&&
is_contiguous<
typename OutputIt::container>::value)>
#if FMT_CLANG_VERSION >= 307 && !FMT_ICC_VERSION
__attribute__((no_sanitize(
"undefined")))
#endif
FMT_CONSTEXPR20
inline auto
reserve(OutputIt it, size_t n) ->
typename OutputIt::value_type* {
auto& c = get_container(it);
size_t size = c.size();
c.resize(size + n);
return &c[size];
}
template <
typename T>
FMT_CONSTEXPR20
inline auto reserve(basic_appender<T> it, size_t n)
-> basic_appender<T> {
buffer<T>& buf = get_container(it);
buf.try_reserve(buf.size() + n);
return it;
}
template <
typename Iterator>
constexpr
auto reserve(Iterator& it, size_t) -> Iterator& {
return it;
}
template <
typename OutputIt>
using reserve_iterator =
remove_reference_t<decltype(reserve(std::declval<OutputIt&>(), 0))>;
template <
typename T,
typename OutputIt>
constexpr
auto to_pointer(OutputIt, size_t) -> T* {
return nullptr;
}
template <
typename T>
FMT_CONSTEXPR20
auto to_pointer(basic_appender<T> it, size_t n) -> T* {
buffer<T>& buf = get_container(it);
auto size = buf.size();
buf.try_reserve(size + n);
if (buf.capacity() < size + n)
return nullptr;
buf.try_resize(size + n);
return buf.data() + size;
}
template <
typename OutputIt,
FMT_ENABLE_IF(is_back_insert_iterator<OutputIt>::value&&
is_contiguous<
typename OutputIt::container>::value)>
inline auto base_iterator(OutputIt it,
typename OutputIt::container_type::value_type*)
-> OutputIt {
return it;
}
template <
typename Iterator>
constexpr
auto base_iterator(Iterator, Iterator it) -> Iterator {
return it;
}
// <algorithm> is spectacularly slow to compile in C++20 so use a simple fill_n
// instead (#1998).
template <
typename OutputIt,
typename Size,
typename T>
FMT_CONSTEXPR
auto fill_n(OutputIt out, Size count,
const T& value)
-> OutputIt {
for (Size i = 0; i < count; ++i) *out++ = value;
return out;
}
template <
typename T,
typename Size>
FMT_CONSTEXPR20
auto fill_n(T* out, Size count,
char value) -> T* {
if (is_constant_evaluated())
return fill_n<T*, Size, T>(out, count, value);
std::memset(out, value, to_unsigned(count));
return out + count;
}
template <
typename OutChar,
typename InputIt,
typename OutputIt>
FMT_CONSTEXPR FMT_NOINLINE
auto copy_noinline(InputIt begin, InputIt end,
OutputIt out) -> OutputIt {
return copy<OutChar>(begin, end, out);
}
// A public domain branchless UTF-8 decoder by Christopher Wellons:
// https://github.com/skeeto/branchless-utf8
/* Decode the next character, c, from s, reporting errors in e.
*
* Since this is a branchless decoder, four bytes will be read from the
* buffer regardless of the actual length of the next character. This
* means the buffer _must_ have at least three bytes of zero padding
* following the end of the data stream.
*
* Errors are reported in e, which will be non-zero if the parsed
* character was somehow invalid: invalid byte sequence, non-canonical
* encoding, or a surrogate half.
*
* The function returns a pointer to the next character. When an error
* occurs, this pointer will be a guess that depends on the particular
* error, but it will always advance at least one byte.
*/
FMT_CONSTEXPR
inline auto utf8_decode(
const char* s, uint32_t* c,
int* e)
->
const char* {
constexpr
const int masks[] = {0x00, 0x7f, 0x1f, 0x0f, 0x07};
constexpr
const uint32_t mins[] = {4194304, 0, 128, 2048, 65536};
constexpr
const int shiftc[] = {0, 18, 12, 6, 0};
constexpr
const int shifte[] = {0, 6, 4, 2, 0};
int len =
"\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0\0\0\2\2\2\2\3\3\4"
[
static_cast<
unsigned char>(*s) >> 3];
// Compute the pointer to the next character early so that the next
// iteration can start working on the next character. Neither Clang
// nor GCC figure out this reordering on their own.
const char* next = s + len + !len;
using uchar =
unsigned char;
// Assume a four-byte character and load four bytes. Unused bits are
// shifted out.
*c = uint32_t(uchar(s[0]) & masks[len]) << 18;
*c |= uint32_t(uchar(s[1]) & 0x3f) << 12;
*c |= uint32_t(uchar(s[2]) & 0x3f) << 6;
*c |= uint32_t(uchar(s[3]) & 0x3f) << 0;
*c >>= shiftc[len];
// Accumulate the various error conditions.
*e = (*c < mins[len]) << 6;
// non-canonical encoding
*e |= ((*c >> 11) == 0x1b) << 7;
// surrogate half?
*e |= (*c > 0x10FFFF) << 8;
// out of range?
*e |= (uchar(s[1]) & 0xc0) >> 2;
*e |= (uchar(s[2]) & 0xc0) >> 4;
*e |= uchar(s[3]) >> 6;
*e ^= 0x2a;
// top two bits of each tail byte correct?
*e >>= shifte[len];
return next;
}
constexpr FMT_INLINE_VARIABLE uint32_t invalid_code_point = ~uint32_t();
// Invokes f(cp, sv) for every code point cp in s with sv being the string view
// corresponding to the code point. cp is invalid_code_point on error.
template <
typename F>
FMT_CONSTEXPR
void for_each_codepoint(string_view s, F f) {
auto decode = [f](
const char* buf_ptr,
const char* ptr) {
auto cp = uint32_t();
auto error = 0;
auto end = utf8_decode(buf_ptr, &cp, &error);
bool result = f(error ? invalid_code_point : cp,
string_view(ptr, error ? 1 : to_unsigned(end - buf_ptr)));
return result ? (error ? buf_ptr + 1 : end) : nullptr;
};
auto p = s.data();
const size_t block_size = 4;
// utf8_decode always reads blocks of 4 chars.
if (s.size() >= block_size) {
for (
auto end = p + s.size() - block_size + 1; p < end;) {
p = decode(p, p);
if (!p)
return;
}
}
auto num_chars_left = to_unsigned(s.data() + s.size() - p);
if (num_chars_left == 0)
return;
FMT_ASSERT(num_chars_left < block_size,
"");
char buf[2 * block_size - 1] = {};
copy<
char>(p, p + num_chars_left, buf);
const char* buf_ptr = buf;
do {
auto end = decode(buf_ptr, p);
if (!end)
return;
p += end - buf_ptr;
buf_ptr = end;
}
while (buf_ptr < buf + num_chars_left);
}
template <
typename Char>
inline auto compute_width(basic_string_view<
Char> s) -> size_t {
return s.size();
}
// Computes approximate display width of a UTF-8 string.
FMT_CONSTEXPR
inline auto compute_width(string_view s) -> size_t {
size_t num_code_points = 0;
// It is not a lambda for compatibility with C++14.
struct count_code_points {
size_t* count;
FMT_CONSTEXPR
auto operator()(uint32_t cp, string_view)
const ->
bool {
*count += to_unsigned(
1 +
(cp >= 0x1100 &&
(cp <= 0x115f ||
// Hangul Jamo init. consonants
cp == 0x2329 ||
// LEFT-POINTING ANGLE BRACKET
cp == 0x232a ||
// RIGHT-POINTING ANGLE BRACKET
// CJK ... Yi except IDEOGRAPHIC HALF FILL SPACE:
(cp >= 0x2e80 && cp <= 0xa4cf && cp != 0x303f) ||
(cp >= 0xac00 && cp <= 0xd7a3) ||
// Hangul Syllables
(cp >= 0xf900 && cp <= 0xfaff) ||
// CJK Compatibility Ideographs
(cp >= 0xfe10 && cp <= 0xfe19) ||
// Vertical Forms
(cp >= 0xfe30 && cp <= 0xfe6f) ||
// CJK Compatibility Forms
(cp >= 0xff00 && cp <= 0xff60) ||
// Fullwidth Forms
(cp >= 0xffe0 && cp <= 0xffe6) ||
// Fullwidth Forms
(cp >= 0x20000 && cp <= 0x2fffd) ||
// CJK
(cp >= 0x30000 && cp <= 0x3fffd) ||
// Miscellaneous Symbols and Pictographs + Emoticons:
(cp >= 0x1f300 && cp <= 0x1f64f) ||
// Supplemental Symbols and Pictographs:
(cp >= 0x1f900 && cp <= 0x1f9ff))));
return true;
}
};
// We could avoid branches by using utf8_decode directly.
for_each_codepoint(s, count_code_points{&num_code_points});
return num_code_points;
}
template <
typename Char>
inline auto code_point_index(basic_string_view<
Char> s, size_t n) -> size_t {
return min_of(n, s.size());
}
// Calculates the index of the nth code point in a UTF-8 string.
inline auto code_point_index(string_view s, size_t n) -> size_t {
size_t result = s.size();
const char* begin = s.begin();
for_each_codepoint(s, [begin, &n, &result](uint32_t, string_view sv) {
if (n != 0) {
--n;
return true;
}
result = to_unsigned(sv.begin() - begin);
return false;
});
return result;
}
template <
typename T>
struct is_integral : std::is_integral<T> {};
template <>
struct is_integral<int128_opt> : std::true_type {};
template <>
struct is_integral<uint128_t> : std::true_type {};
template <
typename T>
using is_signed =
std::integral_constant<
bool, std::numeric_limits<T>::is_signed ||
std::is_same<T, int128_opt>::value>;
template <
typename T>
using is_integer =
bool_constant<is_integral<T>::value && !std::is_same<T,
bool>::value &&
!std::is_same<T,
char>::value &&
!std::is_same<T,
wchar_t>::value>;
#if defined(FMT_USE_FLOAT128)
// Use the provided definition.
#elif FMT_CLANG_VERSION && FMT_HAS_INCLUDE(<quadmath.h>)
# define FMT_USE_FLOAT128 1
#elif FMT_GCC_VERSION &&
defined(_GLIBCXX_USE_FLOAT128) && \
!
defined(__STRICT_ANSI__)
# define FMT_USE_FLOAT128 1
#else
# define FMT_USE_FLOAT128 0
#endif
#if FMT_USE_FLOAT128
using float128 = __float128;
#else
struct float128 {};
#endif
template <
typename T>
using is_float128 = std::is_same<T, float128>;
template <
typename T>
using is_floating_point =
bool_constant<std::is_floating_point<T>::value || is_float128<T>::value>;
template <
typename T,
bool = std::is_floating_point<T>::value>
struct is_fast_float : bool_constant<std::numeric_limits<T>::is_iec559 &&
sizeof(T) <=
sizeof(
double)> {};
template <
typename T>
struct is_fast_float<T,
false> : std::false_type {};
template <
typename T>
using is_double_double = bool_constant<std::numeric_limits<T>::digits == 106>;
#ifndef FMT_USE_FULL_CACHE_DRAGONBOX
# define FMT_USE_FULL_CACHE_DRAGONBOX 0
#endif
// An allocator that uses malloc/free to allow removing dependency on the C++
// standard libary runtime.
template <
typename T>
struct allocator {
using value_type = T;
T* allocate(size_t n) {
FMT_ASSERT(n <= max_value<size_t>() /
sizeof(T),
"");
T* p =
static_cast<T*>(malloc(n *
sizeof(T)));
if (!p) FMT_THROW(std::bad_alloc());
return p;
}
void deallocate(T* p, size_t) { free(p); }
};
}
// namespace detail
FMT_BEGIN_EXPORT
// The number of characters to store in the basic_memory_buffer object itself
// to avoid dynamic memory allocation.
enum { inline_buffer_size = 500 };
/**
* A dynamically growing memory buffer for trivially copyable/constructible
* types with the first `SIZE` elements stored in the object itself. Most
* commonly used via the `memory_buffer` alias for `char`.
*
* **Example**:
*
* auto out = fmt::memory_buffer();
* fmt::format_to(std::back_inserter(out), "The answer is {}.", 42);
*
* This will append "The answer is 42." to `out`. The buffer content can be
* converted to `std::string` with `to_string(out)`.
*/
template <
typename T, size_t SIZE = inline_buffer_size,
typename Allocator = detail::allocator<T>>
class basic_memory_buffer :
public detail::buffer<T> {
private:
T store_[SIZE];
// Don't inherit from Allocator to avoid generating type_info for it.
FMT_NO_UNIQUE_ADDRESS Allocator alloc_;
// Deallocate memory allocated by the buffer.
FMT_CONSTEXPR20
void deallocate() {
T* data = this->data();
if (data != store_) alloc_.deallocate(data, this->capacity());
}
static FMT_CONSTEXPR20
void grow(detail::buffer<T>& buf, size_t size) {
detail::abort_fuzzing_if(size > 5000);
auto& self =
static_cast<basic_memory_buffer&>(buf);
const size_t max_size =
std::allocator_traits<Allocator>::max_size(self.alloc_);
size_t old_capacity = buf.capacity();
size_t new_capacity = old_capacity + old_capacity / 2;
if (size > new_capacity)
new_capacity = size;
else if (new_capacity > max_size)
new_capacity = max_of(size, max_size);
T* old_data = buf.data();
T* new_data = self.alloc_.allocate(new_capacity);
// Suppress a bogus -Wstringop-overflow in gcc 13.1 (#3481).
detail::assume(buf.size() <= new_capacity);
// The following code doesn't throw, so the raw pointer above doesn't leak.
memcpy(new_data, old_data, buf.size() *
sizeof(T));
self.set(new_data, new_capacity);
// deallocate must not throw according to the standard, but even if it does,
// the buffer already uses the new storage and will deallocate it in
// destructor.
if (old_data != self.store_) self.alloc_.deallocate(old_data, old_capacity);
}
public:
using value_type = T;
using const_reference =
const T&;
FMT_CONSTEXPR
explicit basic_memory_buffer(
const Allocator& alloc = Allocator())
: detail::buffer<T>(grow), alloc_(alloc) {
this->set(store_, SIZE);
if (detail::is_constant_evaluated()) detail::fill_n(store_, SIZE, T());
}
FMT_CONSTEXPR20 ~basic_memory_buffer() { deallocate(); }
private:
// Move data from other to this buffer.
FMT_CONSTEXPR20
void move(basic_memory_buffer& other) {
alloc_ = std::move(other.alloc_);
T* data = other.data();
size_t size = other.size(), capacity = other.capacity();
if (data == other.store_) {
this->set(store_, capacity);
detail::copy<T>(other.store_, other.store_ + size, store_);
}
else {
this->set(data, capacity);
// Set pointer to the inline array so that delete is not called
// when deallocating.
other.set(other.store_, 0);
other.clear();
}
this->resize(size);
}
public:
/// Constructs a `basic_memory_buffer` object moving the content of the other
/// object to it.
FMT_CONSTEXPR20 basic_memory_buffer(basic_memory_buffer&& other) noexcept
: detail::buffer<T>(grow) {
move(other);
}
/// Moves the content of the other `basic_memory_buffer` object to this one.
auto operator=(basic_memory_buffer&& other) noexcept -> basic_memory_buffer& {
FMT_ASSERT(
this != &other,
"");
deallocate();
move(other);
return *
this;
}
// Returns a copy of the allocator associated with this buffer.
auto get_allocator()
const -> Allocator {
return alloc_; }
/// Resizes the buffer to contain `count` elements. If T is a POD type new
/// elements may not be initialized.
FMT_CONSTEXPR
void resize(size_t count) { this->try_resize(count); }
/// Increases the buffer capacity to `new_capacity`.
void reserve(size_t new_capacity) { this->try_reserve(new_capacity); }
using detail::buffer<T>::append;
template <
typename ContiguousRange>
FMT_CONSTEXPR20
void append(
const ContiguousRange& range) {
append(range.data(), range.data() + range.size());
}
};
using memory_buffer = basic_memory_buffer<
char>;
template <size_t SIZE>
FMT_NODISCARD
auto to_string(basic_memory_buffer<
char, SIZE>& buf)
-> std::string {
auto size = buf.size();
detail::assume(size < std::string().max_size());
return {buf.data(), size};
}
// A writer to a buffered stream. It doesn't own the underlying stream.
class writer {
private:
detail::buffer<
char>* buf_;
// We cannot create a file buffer in advance because any write to a FILE may
// invalidate it.
FILE* file_;
public:
inline writer(FILE* f) : buf_(nullptr), file_(f) {}
inline writer(detail::buffer<
char>& buf) : buf_(&buf) {}
/// Formats `args` according to specifications in `fmt` and writes the
/// output to the file.
template <
typename... T>
void print(format_string<T...> fmt, T&&... args) {
if (buf_)
fmt::format_to(appender(*buf_), fmt, std::forward<T>(args)...);
else
fmt::print(file_, fmt, std::forward<T>(args)...);
}
};
class string_buffer {
private:
std::string str_;
detail::container_buffer<std::string> buf_;
public:
inline string_buffer() : buf_(str_) {}
inline operator writer() {
return buf_; }
inline std::string& str() {
return str_; }
};
template <
typename T, size_t SIZE,
typename Allocator>
struct is_contiguous<basic_memory_buffer<T, SIZE, Allocator>> : std::true_type {
};
FMT_END_EXPORT
namespace detail {
FMT_API
auto write_console(
int fd, string_view text) ->
bool;
FMT_API
void print(FILE*, string_view);
}
// namespace detail
FMT_BEGIN_EXPORT
// Suppress a misleading warning in older versions of clang.
FMT_PRAGMA_CLANG(diagnostic ignored
"-Wweak-vtables")
/// An error reported from a formatting function.
class FMT_SO_VISIBILITY(
"default") format_error :
public std::runtime_error {
public:
using std::runtime_error::runtime_error;
};
namespace detail {
template <
typename Char, size_t N>
struct fixed_string {
FMT_CONSTEXPR20 fixed_string(
const Char (&s)[N]) {
detail::copy<
Char,
const Char*,
Char*>(
static_cast<
const Char*>(s), s + N,
data);
}
Char data[N] = {};
};
// Converts a compile-time string to basic_string_view.
template <
typename Char, size_t N>
constexpr
auto compile_string_to_view(
const Char (&s)[N])
-> basic_string_view<
Char> {
// Remove trailing NUL character if needed. Won't be present if this is used
// with a raw character array (i.e. not defined as a string).
return {s, N - (std::char_traits<
Char>::to_int_type(s[N - 1]) == 0 ? 1 : 0)};
}
template <
typename Char>
constexpr
auto compile_string_to_view(basic_string_view<
Char> s)
-> basic_string_view<
Char> {
return s;
}
}
// namespace detail
// A generic formatting context with custom output iterator and character
// (code unit) support. Char is the format string code unit type which can be
// different from OutputIt::value_type.
template <
typename OutputIt,
typename Char>
class generic_context {
private:
OutputIt out_;
basic_format_args<generic_context> args_;
detail::locale_ref loc_;
public:
using char_type =
Char;
using iterator = OutputIt;
using parse_context_type FMT_DEPRECATED = parse_context<
Char>;
template <
typename T>
using formatter_type FMT_DEPRECATED = formatter<T,
Char>;
enum { builtin_types = FMT_BUILTIN_TYPES };
constexpr generic_context(OutputIt out,
basic_format_args<generic_context> args,
detail::locale_ref loc = {})
: out_(out), args_(args), loc_(loc) {}
generic_context(generic_context&&) =
default;
generic_context(
const generic_context&) =
delete;
void operator=(
const generic_context&) =
delete;
constexpr
auto arg(
int id)
const -> basic_format_arg<generic_context> {
return args_.get(id);
}
auto arg(basic_string_view<
Char> name) -> basic_format_arg<generic_context> {
return args_.get(name);
}
FMT_CONSTEXPR
auto arg_id(basic_string_view<
Char> name) ->
int {
return args_.get_id(name);
}
FMT_CONSTEXPR
auto out() -> iterator {
return out_; }
void advance_to(iterator it) {
if (!detail::is_back_insert_iterator<iterator>()) out_ = it;
}
FMT_CONSTEXPR
auto locale() -> detail::locale_ref {
return loc_; }
};
class loc_value {
private:
basic_format_arg<context> value_;
public:
template <
typename T, FMT_ENABLE_IF(!detail::is_float128<T>::value)>
loc_value(T value) : value_(value) {}
template <
typename T, FMT_ENABLE_IF(detail::is_float128<T>::value)>
loc_value(T) {}
template <
typename Visitor>
auto visit(Visitor&& vis) -> decltype(vis(0)) {
return value_.visit(vis);
}
};
// A locale facet that formats values in UTF-8.
// It is parameterized on the locale to avoid the heavy <locale> include.
template <
typename Locale>
class format_facet :
public Locale::facet {
private:
std::string separator_;
std::string grouping_;
std::string decimal_point_;
protected:
virtual auto do_put(appender out, loc_value val,
const format_specs& specs)
const ->
bool;
public:
static FMT_API
typename Locale::id id;
explicit format_facet(Locale& loc);
explicit format_facet(string_view sep =
"",
std::initializer_list<
unsigned char> g = {3},
std::string decimal_point =
".")
: separator_(sep.data(), sep.size()),
grouping_(g.begin(), g.end()),
decimal_point_(decimal_point) {}
auto put(appender out, loc_value val,
const format_specs& specs)
const
->
bool {
return do_put(out, val, specs);
}
};
FMT_END_EXPORT
namespace detail {
// Returns true if value is negative, false otherwise.
// Same as `value < 0` but doesn't produce warnings if T is an unsigned type.
template <
typename T, FMT_ENABLE_IF(is_signed<T>::value)>
constexpr
auto is_negative(T value) ->
bool {
return value < 0;
}
template <
typename T, FMT_ENABLE_IF(!is_signed<T>::value)>
constexpr
auto is_negative(T) ->
bool {
return false;
}
// Smallest of uint32_t, uint64_t, uint128_t that is large enough to
// represent all values of an integral type T.
template <
typename T>
using uint32_or_64_or_128_t =
conditional_t<num_bits<T>() <= 32 && !FMT_REDUCE_INT_INSTANTIATIONS,
uint32_t,
conditional_t<num_bits<T>() <= 64, uint64_t, uint128_t>>;
template <
typename T>
using uint64_or_128_t = conditional_t<num_bits<T>() <= 64, uint64_t, uint128_t>;
#define FMT_POWERS_OF_10(factor) \
factor * 10, (factor) * 100, (factor) * 1000, (factor) * 10000, \
(factor) * 100000, (factor) * 1000000, (factor) * 10000000, \
(factor) * 100000000, (factor) * 1000000000
// Converts value in the range [0, 100) to a string.
// GCC generates slightly better code when value is pointer-size.
inline auto digits2(size_t value) ->
const char* {
// Align data since unaligned access may be slower when crossing a
// hardware-specific boundary.
alignas(2)
static const char data[] =
"0001020304050607080910111213141516171819"
"2021222324252627282930313233343536373839"
"4041424344454647484950515253545556575859"
"6061626364656667686970717273747576777879"
"8081828384858687888990919293949596979899";
return &data[value * 2];
}
template <
typename Char> constexpr
auto getsign(sign s) ->
Char {
return static_cast<
char>(((
' ' << 24) | (
'+' << 16) | (
'-' << 8)) >>
(
static_cast<
int>(s) * 8));
}
template <
typename T> FMT_CONSTEXPR
auto count_digits_fallback(T n) ->
int {
int count = 1;
for (;;) {
// Integer division is slow so do it for a group of four digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
if (n < 10)
return count;
if (n < 100)
return count + 1;
if (n < 1000)
return count + 2;
if (n < 10000)
return count + 3;
n /= 10000u;
count += 4;
}
}
#if FMT_USE_INT128
FMT_CONSTEXPR
inline auto count_digits(uint128_opt n) ->
int {
return count_digits_fallback(n);
}
#endif
#ifdef FMT_BUILTIN_CLZLL
// It is a separate function rather than a part of count_digits to workaround
// the lack of static constexpr in constexpr functions.
inline auto do_count_digits(uint64_t n) ->
int {
// This has comparable performance to the version by Kendall Willets
// (https://github.com/fmtlib/format-benchmark/blob/master/digits10)
// but uses smaller tables.
// Maps bsr(n) to ceil(log10(pow(2, bsr(n) + 1) - 1)).
static constexpr uint8_t bsr2log10[] = {
1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5,
6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10,
10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15,
15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20};
auto t = bsr2log10[FMT_BUILTIN_CLZLL(n | 1) ^ 63];
static constexpr
const uint64_t zero_or_powers_of_10[] = {
0, 0, FMT_POWERS_OF_10(1U), FMT_POWERS_OF_10(1000000000ULL),
10000000000000000000ULL};
return t - (n < zero_or_powers_of_10[t]);
}
#endif
// Returns the number of decimal digits in n. Leading zeros are not counted
// except for n == 0 in which case count_digits returns 1.
FMT_CONSTEXPR20
inline auto count_digits(uint64_t n) ->
int {
#ifdef FMT_BUILTIN_CLZLL
if (!is_constant_evaluated() && !FMT_OPTIMIZE_SIZE)
return do_count_digits(n);
#endif
return count_digits_fallback(n);
}
// Counts the number of digits in n. BITS = log2(radix).
template <
int BITS,
typename UInt>
FMT_CONSTEXPR
auto count_digits(UInt n) ->
int {
#ifdef FMT_BUILTIN_CLZ
if (!is_constant_evaluated() && num_bits<UInt>() == 32)
return (FMT_BUILTIN_CLZ(
static_cast<uint32_t>(n) | 1) ^ 31) / BITS + 1;
#endif
// Lambda avoids unreachable code warnings from NVHPC.
return [](UInt m) {
int num_digits = 0;
do {
++num_digits;
}
while ((m >>= BITS) != 0);
return num_digits;
}(n);
}
#ifdef FMT_BUILTIN_CLZ
// It is a separate function rather than a part of count_digits to workaround
// the lack of static constexpr in constexpr functions.
FMT_INLINE
auto do_count_digits(uint32_t n) ->
int {
// An optimization by Kendall Willets from https://bit.ly/3uOIQrB.
// This increments the upper 32 bits (log10(T) - 1) when >= T is added.
# define FMT_INC(T) (((
sizeof(
#T) - 1ull) << 32) - T)
static constexpr uint64_t table[] = {
FMT_INC(0), FMT_INC(0), FMT_INC(0),
// 8
FMT_INC(10), FMT_INC(10), FMT_INC(10),
// 64
FMT_INC(100), FMT_INC(100), FMT_INC(100),
// 512
FMT_INC(1000), FMT_INC(1000), FMT_INC(1000),
// 4096
FMT_INC(10000), FMT_INC(10000), FMT_INC(10000),
// 32k
FMT_INC(100000), FMT_INC(100000), FMT_INC(100000),
// 256k
FMT_INC(1000000), FMT_INC(1000000), FMT_INC(1000000),
// 2048k
FMT_INC(10000000), FMT_INC(10000000), FMT_INC(10000000),
// 16M
FMT_INC(100000000), FMT_INC(100000000), FMT_INC(100000000),
// 128M
FMT_INC(1000000000), FMT_INC(1000000000), FMT_INC(1000000000),
// 1024M
FMT_INC(1000000000), FMT_INC(1000000000)
// 4B
};
auto inc = table[FMT_BUILTIN_CLZ(n | 1) ^ 31];
return static_cast<
int>((n + inc) >> 32);
}
#endif
// Optional version of count_digits for better performance on 32-bit platforms.
FMT_CONSTEXPR20
inline auto count_digits(uint32_t n) ->
int {
#ifdef FMT_BUILTIN_CLZ
if (!is_constant_evaluated() && !FMT_OPTIMIZE_SIZE)
return do_count_digits(n);
#endif
return count_digits_fallback(n);
}
template <
typename Int> constexpr
auto digits10() noexcept ->
int {
return std::numeric_limits<
Int>::digits10;
}
template <> constexpr
auto digits10<int128_opt>() noexcept ->
int {
return 38; }
template <> constexpr
auto digits10<uint128_t>() noexcept ->
int {
return 38; }
template <
typename Char>
struct thousands_sep_result {
std::string grouping;
Char thousands_sep;
};
template <
typename Char>
FMT_API
auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result<
Char>;
template <
typename Char>
inline auto thousands_sep(locale_ref loc) -> thousands_sep_result<
Char> {
auto result = thousands_sep_impl<
char>(loc);
return {result.grouping,
Char(result.thousands_sep)};
}
template <>
inline auto thousands_sep(locale_ref loc) -> thousands_sep_result<
wchar_t> {
return thousands_sep_impl<
wchar_t>(loc);
}
template <
typename Char>
FMT_API
auto decimal_point_impl(locale_ref loc) ->
Char;
template <
typename Char>
inline auto decimal_point(locale_ref loc) ->
Char {
return Char(decimal_point_impl<
char>(loc));
}
template <>
inline auto decimal_point(locale_ref loc) ->
wchar_t {
return decimal_point_impl<
wchar_t>(loc);
}
#ifndef FMT_HEADER_ONLY
FMT_BEGIN_EXPORT
extern template FMT_API
auto thousands_sep_impl<
char>(locale_ref)
-> thousands_sep_result<
char>;
extern template FMT_API
auto thousands_sep_impl<
wchar_t>(locale_ref)
-> thousands_sep_result<
wchar_t>;
extern template FMT_API
auto decimal_point_impl(locale_ref) ->
char;
extern template FMT_API
auto decimal_point_impl(locale_ref) ->
wchar_t;
FMT_END_EXPORT
#endif // FMT_HEADER_ONLY
// Compares two characters for equality.
template <
typename Char>
auto equal2(
const Char* lhs,
const char* rhs) ->
bool {
return lhs[0] ==
Char(rhs[0]) && lhs[1] ==
Char(rhs[1]);
}
inline auto equal2(
const char* lhs,
const char* rhs) ->
bool {
return memcmp(lhs, rhs, 2) == 0;
}
// Writes a two-digit value to out.
template <
typename Char>
FMT_CONSTEXPR20 FMT_INLINE
void write2digits(
Char* out, size_t value) {
if (!is_constant_evaluated() && std::is_same<
Char,
char>::value &&
!FMT_OPTIMIZE_SIZE) {
memcpy(out, digits2(value), 2);
return;
}
*out++ =
static_cast<
Char>(
'0' + value / 10);
*out =
static_cast<
Char>(
'0' + value % 10);
}
// Formats a decimal unsigned integer value writing to out pointing to a buffer
// of specified size. The caller must ensure that the buffer is large enough.
template <
typename Char,
typename UInt>
FMT_CONSTEXPR20
auto do_format_decimal(
Char* out, UInt value,
int size)
->
Char* {
FMT_ASSERT(size >= count_digits(value),
"invalid digit count");
unsigned n = to_unsigned(size);
while (value >= 100) {
// Integer division is slow so do it for a group of two digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
n -= 2;
write2digits(out + n,
static_cast<
unsigned>(value % 100));
value /= 100;
}
if (value >= 10) {
n -= 2;
write2digits(out + n,
static_cast<
unsigned>(value));
}
else {
out[--n] =
static_cast<
Char>(
'0' + value);
}
return out + n;
}
template <
typename Char,
typename UInt>
FMT_CONSTEXPR FMT_INLINE
auto format_decimal(
Char* out, UInt value,
int num_digits) ->
Char* {
do_format_decimal(out, value, num_digits);
return out + num_digits;
}
template <
typename Char,
typename UInt,
typename OutputIt,
FMT_ENABLE_IF(is_back_insert_iterator<OutputIt>::value)>
FMT_CONSTEXPR
auto format_decimal(OutputIt out, UInt value,
int num_digits)
-> OutputIt {
if (
auto ptr = to_pointer<
Char>(out, to_unsigned(num_digits))) {
do_format_decimal(ptr, value, num_digits);
return out;
}
// Buffer is large enough to hold all digits (digits10 + 1).
char buffer[digits10<UInt>() + 1];
if (is_constant_evaluated()) fill_n(buffer,
sizeof(buffer),
'\0');
do_format_decimal(buffer, value, num_digits);
return copy_noinline<
Char>(buffer, buffer + num_digits, out);
}
template <
typename Char,
typename UInt>
FMT_CONSTEXPR
auto do_format_base2e(
int base_bits,
Char* out, UInt value,
int size,
bool upper =
false) ->
Char* {
out += size;
do {
const char* digits = upper ?
"0123456789ABCDEF" :
"0123456789abcdef";
unsigned digit =
static_cast<
unsigned>(value & ((1 << base_bits) - 1));
*--out =
static_cast<
Char>(base_bits < 4 ?
static_cast<
char>(
'0' + digit)
: digits[digit]);
}
while ((value >>= base_bits) != 0);
return out;
}
// Formats an unsigned integer in the power of two base (binary, octal, hex).
template <
typename Char,
typename UInt>
FMT_CONSTEXPR
auto format_base2e(
int base_bits,
Char* out, UInt value,
int num_digits,
bool upper =
false) ->
Char* {
do_format_base2e(base_bits, out, value, num_digits, upper);
return out + num_digits;
}
template <
typename Char,
typename OutputIt,
typename UInt,
FMT_ENABLE_IF(is_back_insert_iterator<OutputIt>::value)>
FMT_CONSTEXPR
inline auto format_base2e(
int base_bits, OutputIt out, UInt value,
int num_digits,
bool upper =
false)
-> OutputIt {
if (
auto ptr = to_pointer<
Char>(out, to_unsigned(num_digits))) {
format_base2e(base_bits, ptr, value, num_digits, upper);
return out;
}
// Make buffer large enough for any base.
char buffer[num_bits<UInt>()];
if (is_constant_evaluated()) fill_n(buffer,
sizeof(buffer),
'\0');
format_base2e(base_bits, buffer, value, num_digits, upper);
return detail::copy_noinline<
Char>(buffer, buffer + num_digits, out);
}
// A converter from UTF-8 to UTF-16.
class utf8_to_utf16 {
private:
basic_memory_buffer<
wchar_t> buffer_;
public:
FMT_API
explicit utf8_to_utf16(string_view s);
inline operator basic_string_view<
wchar_t>()
const {
return {&buffer_[0], size()};
}
inline auto size()
const -> size_t {
return buffer_.size() - 1; }
inline auto c_str()
const ->
const wchar_t* {
return &buffer_[0]; }
inline auto str()
const -> std::wstring {
return {&buffer_[0], size()}; }
};
enum class to_utf8_error_policy { abort, replace };
// A converter from UTF-16/UTF-32 (host endian) to UTF-8.
template <
typename WChar,
typename Buffer = memory_buffer>
class to_utf8 {
private:
Buffer buffer_;
public:
to_utf8() {}
explicit to_utf8(basic_string_view<WChar> s,
to_utf8_error_policy policy = to_utf8_error_policy::abort) {
static_assert(
sizeof(WChar) == 2 ||
sizeof(WChar) == 4,
"Expect utf16 or utf32");
if (!convert(s, policy))
FMT_THROW(std::runtime_error(
sizeof(WChar) == 2 ?
"invalid utf16"
:
"invalid utf32"));
}
operator string_view()
const {
return string_view(&buffer_[0], size()); }
auto size()
const -> size_t {
return buffer_.size() - 1; }
auto c_str()
const ->
const char* {
return &buffer_[0]; }
auto str()
const -> std::string {
return std::string(&buffer_[0], size()); }
// Performs conversion returning a bool instead of throwing exception on
// conversion error. This method may still throw in case of memory allocation
// error.
auto convert(basic_string_view<WChar> s,
to_utf8_error_policy policy = to_utf8_error_policy::abort)
->
bool {
if (!convert(buffer_, s, policy))
return false;
buffer_.push_back(0);
return true;
}
static auto convert(Buffer& buf, basic_string_view<WChar> s,
to_utf8_error_policy policy = to_utf8_error_policy::abort)
->
bool {
for (
auto p = s.begin(); p != s.end(); ++p) {
uint32_t c =
static_cast<uint32_t>(*p);
if (
sizeof(WChar) == 2 && c >= 0xd800 && c <= 0xdfff) {
// Handle a surrogate pair.
++p;
if (p == s.end() || (c & 0xfc00) != 0xd800 || (*p & 0xfc00) != 0xdc00) {
if (policy == to_utf8_error_policy::abort)
return false;
buf.append(string_view(
"\xEF\xBF\xBD"));
--p;
continue;
}
else {
c = (c << 10) +
static_cast<uint32_t>(*p) - 0x35fdc00;
}
}
if (c < 0x80) {
buf.push_back(
static_cast<
char>(c));
}
else if (c < 0x800) {
buf.push_back(
static_cast<
char>(0xc0 | (c >> 6)));
buf.push_back(
static_cast<
char>(0x80 | (c & 0x3f)));
}
else if ((c >= 0x800 && c <= 0xd7ff) || (c >= 0xe000 && c <= 0xffff)) {
buf.push_back(
static_cast<
char>(0xe0 | (c >> 12)));
buf.push_back(
static_cast<
char>(0x80 | ((c & 0xfff) >> 6)));
buf.push_back(
static_cast<
char>(0x80 | (c & 0x3f)));
}
else if (c >= 0x10000 && c <= 0x10ffff) {
buf.push_back(
static_cast<
char>(0xf0 | (c >> 18)));
buf.push_back(
static_cast<
char>(0x80 | ((c & 0x3ffff) >> 12)));
buf.push_back(
static_cast<
char>(0x80 | ((c & 0xfff) >> 6)));
buf.push_back(
static_cast<
char>(0x80 | (c & 0x3f)));
}
else {
return false;
}
}
return true;
}
};
// Computes 128-bit result of multiplication of two 64-bit unsigned integers.
inline auto umul128(uint64_t x, uint64_t y) noexcept -> uint128_fallback {
#if FMT_USE_INT128
auto p =
static_cast<uint128_opt>(x) *
static_cast<uint128_opt>(y);
return {
static_cast<uint64_t>(p >> 64),
static_cast<uint64_t>(p)};
#elif defined(_MSC_VER) &&
defined(_M_X64)
auto hi = uint64_t();
auto lo = _umul128(x, y, &hi);
return {hi, lo};
#else
const uint64_t mask =
static_cast<uint64_t>(max_value<uint32_t>());
uint64_t a = x >> 32;
uint64_t b = x & mask;
uint64_t c = y >> 32;
uint64_t d = y & mask;
uint64_t ac = a * c;
uint64_t bc = b * c;
uint64_t ad = a * d;
uint64_t bd = b * d;
uint64_t intermediate = (bd >> 32) + (ad & mask) + (bc & mask);
return {ac + (intermediate >> 32) + (ad >> 32) + (bc >> 32),
(intermediate << 32) + (bd & mask)};
#endif
}
namespace dragonbox {
// Computes floor(log10(pow(2, e))) for e in [-2620, 2620] using the method from
// https://fmt.dev/papers/Dragonbox.pdf#page=28, section 6.1.
inline auto floor_log10_pow2(
int e) noexcept ->
int {
FMT_ASSERT(e <= 2620 && e >= -2620,
"too large exponent");
static_assert((-1 >> 1) == -1,
"right shift is not arithmetic");
return (e * 315653) >> 20;
}
inline auto floor_log2_pow10(
int e) noexcept ->
int {
FMT_ASSERT(e <= 1233 && e >= -1233,
"too large exponent");
return (e * 1741647) >> 19;
}
// Computes upper 64 bits of multiplication of two 64-bit unsigned integers.
inline auto umul128_upper64(uint64_t x, uint64_t y) noexcept -> uint64_t {
#if FMT_USE_INT128
auto p =
static_cast<uint128_opt>(x) *
static_cast<uint128_opt>(y);
return static_cast<uint64_t>(p >> 64);
#elif defined(_MSC_VER) &&
defined(_M_X64)
return __umulh(x, y);
#else
return umul128(x, y).high();
#endif
}
// Computes upper 128 bits of multiplication of a 64-bit unsigned integer and a
// 128-bit unsigned integer.
inline auto umul192_upper128(uint64_t x, uint128_fallback y) noexcept
-> uint128_fallback {
uint128_fallback r = umul128(x, y.high());
r += umul128_upper64(x, y.low());
return r;
}
FMT_API
auto get_cached_power(
int k) noexcept -> uint128_fallback;
// Type-specific information that Dragonbox uses.
template <
typename T,
typename Enable =
void>
struct float_info;
template <>
struct float_info<
float> {
using carrier_uint = uint32_t;
static const int exponent_bits = 8;
static const int kappa = 1;
static const int big_divisor = 100;
static const int small_divisor = 10;
static const int min_k = -31;
static const int max_k = 46;
static const int shorter_interval_tie_lower_threshold = -35;
static const int shorter_interval_tie_upper_threshold = -35;
};
template <>
struct float_info<
double> {
using carrier_uint = uint64_t;
static const int exponent_bits = 11;
static const int kappa = 2;
static const int big_divisor = 1000;
static const int small_divisor = 100;
static const int min_k = -292;
static const int max_k = 341;
static const int shorter_interval_tie_lower_threshold = -77;
static const int shorter_interval_tie_upper_threshold = -77;
};
// An 80- or 128-bit floating point number.
template <
typename T>
struct float_info<T, enable_if_t<std::numeric_limits<T>::digits == 64 ||
std::numeric_limits<T>::digits == 113 ||
is_float128<T>::value>> {
using carrier_uint = detail::uint128_t;
static const int exponent_bits = 15;
};
// A double-double floating point number.
template <
typename T>
struct float_info<T, enable_if_t<is_double_double<T>::value>> {
using carrier_uint = detail::uint128_t;
};
template <
typename T>
struct decimal_fp {
using significand_type =
typename float_info<T>::carrier_uint;
significand_type significand;
int exponent;
};
template <
typename T> FMT_API
auto to_decimal(T x) noexcept -> decimal_fp<T>;
}
// namespace dragonbox
// Returns true iff Float has the implicit bit which is not stored.
template <
typename Float> constexpr
auto has_implicit_bit() ->
bool {
// An 80-bit FP number has a 64-bit significand an no implicit bit.
return std::numeric_limits<
Float>::digits != 64;
}
// Returns the number of significand bits stored in Float. The implicit bit is
// not counted since it is not stored.
template <
typename Float> constexpr
auto num_significand_bits() ->
int {
// std::numeric_limits may not support __float128.
return is_float128<
Float>() ? 112
: (std::numeric_limits<
Float>::digits -
(has_implicit_bit<
Float>() ? 1 : 0));
}
template <
typename Float>
constexpr
auto exponent_mask() ->
typename dragonbox::float_info<
Float>::carrier_uint {
using float_uint =
typename dragonbox::float_info<
Float>::carrier_uint;
return ((float_uint(1) << dragonbox::float_info<
Float>::exponent_bits) - 1)
<< num_significand_bits<
Float>();
}
template <
typename Float> constexpr
auto exponent_bias() ->
int {
// std::numeric_limits may not support __float128.
return is_float128<
Float>() ? 16383
: std::numeric_limits<
Float>::max_exponent - 1;
}
// Writes the exponent exp in the form "[+-]d{2,3}" to buffer.
template <
typename Char,
typename OutputIt>
FMT_CONSTEXPR
auto write_exponent(
int exp, OutputIt out) -> OutputIt {
FMT_ASSERT(-10000 < exp && exp < 10000,
"exponent out of range");
if (exp < 0) {
*out++ =
static_cast<
Char>(
'-');
exp = -exp;
}
else {
*out++ =
static_cast<
Char>(
'+');
}
auto uexp =
static_cast<uint32_t>(exp);
if (is_constant_evaluated()) {
if (uexp < 10) *out++ =
'0';
return format_decimal<
Char>(out, uexp, count_digits(uexp));
}
if (uexp >= 100u) {
const char* top = digits2(uexp / 100);
if (uexp >= 1000u) *out++ =
static_cast<
Char>(top[0]);
*out++ =
static_cast<
Char>(top[1]);
uexp %= 100;
}
const char* d = digits2(uexp);
*out++ =
static_cast<
Char>(d[0]);
*out++ =
static_cast<
Char>(d[1]);
return out;
}
// A floating-point number f * pow(2, e) where F is an unsigned type.
template <
typename F>
struct basic_fp {
F f;
int e;
static constexpr
const int num_significand_bits =
static_cast<
int>(
sizeof(F) * num_bits<
unsigned char>());
constexpr basic_fp() : f(0), e(0) {}
constexpr basic_fp(uint64_t f_val,
int e_val) : f(f_val), e(e_val) {}
// Constructs fp from an IEEE754 floating-point number.
template <
typename Float> FMT_CONSTEXPR basic_fp(
Float n) { assign(n); }
// Assigns n to this and return true iff predecessor is closer than successor.
template <
typename Float, FMT_ENABLE_IF(!is_double_double<
Float>::value)>
FMT_CONSTEXPR
auto assign(
Float n) ->
bool {
static_assert(std::numeric_limits<
Float>::digits <= 113,
"unsupported FP");
// Assume Float is in the format [sign][exponent][significand].
using carrier_uint =
typename dragonbox::float_info<
Float>::carrier_uint;
const auto num_float_significand_bits =
detail::num_significand_bits<
Float>();
const auto implicit_bit = carrier_uint(1) << num_float_significand_bits;
const auto significand_mask = implicit_bit - 1;
auto u = bit_cast<carrier_uint>(n);
f =
static_cast<F>(u & significand_mask);
auto biased_e =
static_cast<
int>((u & exponent_mask<
Float>()) >>
num_float_significand_bits);
// The predecessor is closer if n is a normalized power of 2 (f == 0)
// other than the smallest normalized number (biased_e > 1).
auto is_predecessor_closer = f == 0 && biased_e > 1;
if (biased_e == 0)
biased_e = 1;
// Subnormals use biased exponent 1 (min exponent).
else if (has_implicit_bit<
Float>())
f +=
static_cast<F>(implicit_bit);
e = biased_e - exponent_bias<
Float>() - num_float_significand_bits;
if (!has_implicit_bit<
Float>()) ++e;
return is_predecessor_closer;
}
template <
typename Float, FMT_ENABLE_IF(is_double_double<
Float>::value)>
FMT_CONSTEXPR
auto assign(
Float n) ->
bool {
static_assert(std::numeric_limits<
double>::is_iec559,
"unsupported FP");
return assign(
static_cast<
double>(n));
}
};
using fp = basic_fp<
unsigned long long>;
// Normalizes the value converted from double and multiplied by (1 << SHIFT).
template <
int SHIFT = 0,
typename F>
FMT_CONSTEXPR
auto normalize(basic_fp<F> value) -> basic_fp<F> {
// Handle subnormals.
const auto implicit_bit = F(1) << num_significand_bits<
double>();
const auto shifted_implicit_bit = implicit_bit << SHIFT;
while ((value.f & shifted_implicit_bit) == 0) {
value.f <<= 1;
--value.e;
}
// Subtract 1 to account for hidden bit.
const auto offset = basic_fp<F>::num_significand_bits -
num_significand_bits<
double>() - SHIFT - 1;
value.f <<= offset;
value.e -= offset;
return value;
}
// Computes lhs * rhs / pow(2, 64) rounded to nearest with half-up tie breaking.
FMT_CONSTEXPR
inline auto multiply(uint64_t lhs, uint64_t rhs) -> uint64_t {
#if FMT_USE_INT128
auto product =
static_cast<__uint128_t>(lhs) * rhs;
auto f =
static_cast<uint64_t>(product >> 64);
return (
static_cast<uint64_t>(product) & (1ULL << 63)) != 0 ? f + 1 : f;
#else
// Multiply 32-bit parts of significands.
uint64_t mask = (1ULL << 32) - 1;
uint64_t a = lhs >> 32, b = lhs & mask;
uint64_t c = rhs >> 32, d = rhs & mask;
uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d;
// Compute mid 64-bit of result and round.
uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31);
return ac + (ad >> 32) + (bc >> 32) + (mid >> 32);
#endif
}
FMT_CONSTEXPR
inline auto operator*(fp x, fp y) -> fp {
return {multiply(x.f, y.f), x.e + y.e + 64};
}
template <
typename T,
bool doublish = num_bits<T>() == num_bits<
double>()>
using convert_float_result =
conditional_t<std::is_same<T,
float>::value || doublish,
double, T>;
template <
typename T>
constexpr
auto convert_float(T value) -> convert_float_result<T> {
return static_cast<convert_float_result<T>>(value);
}
template <
typename Char,
typename OutputIt>
FMT_NOINLINE FMT_CONSTEXPR
auto fill(OutputIt it, size_t n,
const basic_specs& specs) -> OutputIt {
auto fill_size = specs.fill_size();
if (fill_size == 1)
return detail::fill_n(it, n, specs.fill_unit<
Char>());
if (
const Char* data = specs.fill<
Char>()) {
for (size_t i = 0; i < n; ++i) it = copy<
Char>(data, data + fill_size, it);
}
return it;
}
// Writes the output of f, padded according to format specifications in specs.
// size: output size in code units.
// width: output display width in (terminal) column positions.
template <
typename Char, align default_align = align::left,
typename OutputIt,
typename F>
FMT_CONSTEXPR
auto write_padded(OutputIt out,
const format_specs& specs,
size_t size, size_t width, F&& f) -> OutputIt {
static_assert(default_align == align::left || default_align == align::right,
"");
unsigned spec_width = to_unsigned(specs.width);
size_t padding = spec_width > width ? spec_width - width : 0;
// Shifts are encoded as string literals because static constexpr is not
// supported in constexpr functions.
auto* shifts =
default_align == align::left ?
"\x1f\x1f\x00\x01" :
"\x00\x1f\x00\x01";
size_t left_padding = padding >> shifts[
static_cast<
int>(specs.align())];
size_t right_padding = padding - left_padding;
auto it = reserve(out, size + padding * specs.fill_size());
if (left_padding != 0) it = fill<
Char>(it, left_padding, specs);
it = f(it);
if (right_padding != 0) it = fill<
Char>(it, right_padding, specs);
return base_iterator(out, it);
}
template <
typename Char, align default_align = align::left,
typename OutputIt,
typename F>
constexpr
auto write_padded(OutputIt out,
const format_specs& specs,
size_t size, F&& f) -> OutputIt {
return write_padded<
Char, default_align>(out, specs, size, size, f);
}
template <
typename Char, align default_align = align::left,
typename OutputIt>
FMT_CONSTEXPR
auto write_bytes(OutputIt out, string_view bytes,
const format_specs& specs = {}) -> OutputIt {
return write_padded<
Char, default_align>(
--> --------------------
--> maximum size reached
--> --------------------