/* Include file for internal GNU MP types and definitions.
THE CONTENTS OF THIS FILE ARE FOR INTERNAL USE AND ARE ALMOST CERTAIN TO BE SUBJECT TO INCOMPATIBLE CHANGES IN FUTURE GNU MP RELEASES.
Copyright 1991-2018, 2021, 2022 Free Software Foundation, Inc.
This file is part of the GNU MP Library.
The GNU MP Library is free software; you can redistribute it and/or modify it under the terms of either:
* the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
or
* the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
or both in parallel, as here.
The GNU MP Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with the GNU MP Library. If not,
see https://www.gnu.org/licenses/. */
/* __GMP_DECLSPEC must be given on any global data that will be accessed from outside libgmp, meaning from the test or development programs, or from libgmpxx. Failing to do this will result in an incorrect address being used for the accesses. On functions __GMP_DECLSPEC makes calls from outside libgmp more efficient, but they'll still work fine without
it. */
#ifndef __GMP_IMPL_H__ #define __GMP_IMPL_H__
#ifdefined _CRAY #include <intrinsics.h> /* for _popcnt */ #endif
/* For INT_MAX, etc. We used to avoid it because of a bug (on solaris, gcc 2.95 under -mcpu=ultrasparc in ABI=32 ends up getting wrong values (the ABI=64 values)), but it should be safe now.
On Cray vector systems, however, we need the system limits.h since sizes of signed and unsigned types can differ there, depending on compiler options (eg. -hnofastmd), making our SHRT_MAX etc expressions fail. For reference, int can be 46 or 64 bits, whereas uint is always 64 bits; and
short can be 24, 32, 46 or 64 bits, and different for ushort. */
#include <limits.h>
/* For fat.h and other fat binary stuff. No need for __GMP_ATTRIBUTE_PURE or __GMP_NOTHROW, since functions declared this way are only used to set function pointers in __gmpn_cpuvec,
they're not called directly. */ #define DECL_add_n(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t) #define DECL_addlsh1_n(name) \
DECL_add_n (name) #define DECL_addlsh2_n(name) \
DECL_add_n (name) #define DECL_addmul_1(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t) #define DECL_addmul_2(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr) #define DECL_bdiv_dbm1c(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) #define DECL_cnd_add_n(name) \
__GMP_DECLSPEC mp_limb_t name (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t) #define DECL_cnd_sub_n(name) \
__GMP_DECLSPEC mp_limb_t name (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t) #define DECL_com(name) \
__GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t) #define DECL_copyd(name) \
__GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t) #define DECL_copyi(name) \
DECL_copyd (name) #define DECL_divexact_1(name) \
__GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t) #define DECL_divexact_by3c(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t) #define DECL_divrem_1(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t) #define DECL_gcd_11(name) \
__GMP_DECLSPEC mp_limb_t name (mp_limb_t, mp_limb_t) #define DECL_lshift(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_srcptr, mp_size_t, unsigned) #define DECL_lshiftc(name) \
DECL_lshift (name) #define DECL_mod_1(name) \
__GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t) #define DECL_mod_1_1p(name) \
__GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t []) #define DECL_mod_1_1p_cps(name) \
__GMP_DECLSPEC void name (mp_limb_t cps[], mp_limb_t b) #define DECL_mod_1s_2p(name) \
DECL_mod_1_1p (name) #define DECL_mod_1s_2p_cps(name) \
DECL_mod_1_1p_cps (name) #define DECL_mod_1s_4p(name) \
DECL_mod_1_1p (name) #define DECL_mod_1s_4p_cps(name) \
DECL_mod_1_1p_cps (name) #define DECL_mod_34lsub1(name) \
__GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t) #define DECL_modexact_1c_odd(name) \
__GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) #define DECL_mul_1(name) \
DECL_addmul_1 (name) #define DECL_mul_basecase(name) \
__GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t) #define DECL_mullo_basecase(name) \
__GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t) #define DECL_preinv_divrem_1(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t, int) #define DECL_preinv_mod_1(name) \
__GMP_DECLSPEC mp_limb_t name (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) #define DECL_redc_1(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t) #define DECL_redc_2(name) \
__GMP_DECLSPEC mp_limb_t name (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr) #define DECL_rshift(name) \
DECL_lshift (name) #define DECL_sqr_basecase(name) \
__GMP_DECLSPEC void name (mp_ptr, mp_srcptr, mp_size_t) #define DECL_sub_n(name) \
DECL_add_n (name) #define DECL_sublsh1_n(name) \
DECL_add_n (name) #define DECL_submul_1(name) \
DECL_addmul_1 (name)
#if HAVE_INTTYPES_H /* for uint_least32_t */ # include <inttypes.h> #endif /* On some platforms inttypes.h exists but is incomplete
and we still need stdint.h. */ #if HAVE_STDINT_H # include <stdint.h> #endif
#ifdef __cplusplus #include <cstring> /* for strlen */ #include <string> /* for std::string */ #endif
#ifndef WANT_TMP_DEBUG /* for TMP_ALLOC_LIMBS_2 and others */ #define WANT_TMP_DEBUG 0 #endif
/* The following tries to get a good version of alloca. The tests are adapted from autoconf AC_FUNC_ALLOCA, with a couple of additions. Whether this succeeds is tested by GMP_FUNC_ALLOCA and HAVE_ALLOCA will be setup appropriately.
ifndef alloca - a cpp define might already exist. glibc <stdlib.h> includes <alloca.h> which uses GCC __builtin_alloca. HP cc +Olibcalls adds a #define of alloca to __builtin_alloca.
_AIX pragma - IBM compilers need a #pragma in "each module that needs to use alloca". Pragma indented to protect pre-ANSI cpp's. _IBMR2 was used in past versions of GMP, retained still in case it matters.
The autoconf manual says this pragma needs to be at the start of a C file, apart from comments and preprocessor directives. Is that true? xlc on aix 4.xxx doesn't seem to mind it being after prototypes etc from gmp.h.
*/
/* if not provided by gmp-mparam.h */ #ifndef GMP_LIMB_BYTES #define GMP_LIMB_BYTES SIZEOF_MP_LIMB_T #endif #ifndef GMP_LIMB_BITS #define GMP_LIMB_BITS (8 * SIZEOF_MP_LIMB_T) #endif
#define BITS_PER_ULONG (8 * SIZEOF_UNSIGNED_LONG)
/* gmp_uint_least32_t is an unsigned integer type with at least 32 bits. */ #if HAVE_UINT_LEAST32_T typedef uint_least32_t gmp_uint_least32_t; #else #if SIZEOF_UNSIGNED_SHORT >= 4 typedefunsignedshort gmp_uint_least32_t; #else #if SIZEOF_UNSIGNED >= 4 typedefunsigned gmp_uint_least32_t; #else typedefunsignedlong gmp_uint_least32_t; #endif #endif #endif
/* gmp_intptr_t, for pointer to integer casts */ #if HAVE_INTPTR_T typedef intptr_t gmp_intptr_t; #else/* fallback */ typedef size_t gmp_intptr_t; #endif
/* pre-inverse types for truncating division and modulo */ typedefstruct {mp_limb_t inv32;} gmp_pi1_t; typedefstruct {mp_limb_t inv21, inv32, inv53;} gmp_pi2_t;
/* "const" basically means a function does nothing but examine its arguments and give a return value, it doesn't read or write any memory (neither global nor pointed to by arguments), and has no other side-effects. This is more restrictive than "pure". See info node "(gcc)Function Attributes". __GMP_NO_ATTRIBUTE_CONST_PURE lets tune/common.c etc turn
this off when trying to write timing loops. */ #if HAVE_ATTRIBUTE_CONST && ! defined (__GMP_NO_ATTRIBUTE_CONST_PURE) #define ATTRIBUTE_CONST __attribute__ ((const)) #else #define ATTRIBUTE_CONST #endif
/* "malloc" means a function behaves like malloc in that the pointer it
returns doesn't alias anything. */ #if HAVE_ATTRIBUTE_MALLOC #define ATTRIBUTE_MALLOC __attribute__ ((malloc)) #else #define ATTRIBUTE_MALLOC #endif
/* va_copy is standard in C99, and gcc provides __va_copy when in strict C89 mode. Falling back to a memcpy will give maximum portability, since it
works no matter whether va_list is a pointer, struct or array. */ #if ! defined (va_copy) && defined (__va_copy) #define va_copy(dst,src) __va_copy(dst,src) #endif #if ! defined (va_copy) #define va_copy(dst,src) \ do { memcpy (&(dst), &(src), sizeof (va_list)); } while (0) #endif
/* HAVE_HOST_CPU_alpha_CIX is 1 on an alpha with the CIX instructions
(ie. ctlz, ctpop, cttz). */ #if HAVE_HOST_CPU_alphaev67 || HAVE_HOST_CPU_alphaev68 \
|| HAVE_HOST_CPU_alphaev7 #define HAVE_HOST_CPU_alpha_CIX 1 #endif
Small allocations should use TMP_SALLOC, big allocations should use TMP_BALLOC. Allocations that might be small or big should use TMP_ALLOC.
Functions that use just TMP_SALLOC should use TMP_SDECL, TMP_SMARK, and TMP_SFREE.
TMP_DECL just declares a variable, but might be empty and so must be last in a list of variables. TMP_MARK must be done before any TMP_ALLOC. TMP_ALLOC(0) is not allowed. TMP_FREE doesn't need to be done if a
TMP_MARK was made, but then no TMP_ALLOCs. */
/* The alignment in bytes, used for TMP_ALLOCed blocks, when alloca or
__gmp_allocate_func doesn't already determine it. */ union tmp_align_t {
mp_limb_t l; double d; char *p;
}; #define __TMP_ALIGN sizeof (union tmp_align_t)
/* Return "a" rounded upwards to a multiple of "m", if it isn't already. "a" must be an unsigned type. This is designed for use with a compile-time constant "m". The POW2 case is expected to be usual, and gcc 3.0 and up recognises "(-(8*n))%8" or the like is always zero, which means the rounding up in
the WANT_TMP_NOTREENTRANT version of TMP_ALLOC below will be a noop. */ #define ROUND_UP_MULTIPLE(a,m) \
(POW2_P(m) ? (a) + (-(a))%(m) \
: (a)+(m)-1 - (((a)+(m)-1) % (m)))
/* It's more efficient to allocate one block than many. This is certainly true of the malloc methods, but it can even be true of alloca if that involves copying a chunk of stack (various RISCs), or a call to a stack bounds check (mingw). In any case, when debugging keep separate blocks
so a redzoning malloc debugger can protect each individually. */ #define TMP_ALLOC_LIMBS_2(xp,xsize, yp,ysize) \ do { \ if (WANT_TMP_DEBUG) \
{ \
(xp) = TMP_ALLOC_LIMBS (xsize); \
(yp) = TMP_ALLOC_LIMBS (ysize); \
} \ else \
{ \
(xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize)); \
(yp) = (xp) + (xsize); \
} \
} while (0) #define TMP_ALLOC_LIMBS_3(xp,xsize, yp,ysize, zp,zsize) \ do { \ if (WANT_TMP_DEBUG) \
{ \
(xp) = TMP_ALLOC_LIMBS (xsize); \
(yp) = TMP_ALLOC_LIMBS (ysize); \
(zp) = TMP_ALLOC_LIMBS (zsize); \
} \ else \
{ \
(xp) = TMP_ALLOC_LIMBS ((xsize) + (ysize) + (zsize)); \
(yp) = (xp) + (xsize); \
(zp) = (yp) + (ysize); \
} \
} while (0)
/* n-1 inverts any low zeros and the lowest one bit. If n&(n-1) leaves zero then that lowest one bit must have been the only bit set. n==0 will
return true though, so avoid that. */ #define POW2_P(n) (((n) & ((n) - 1)) == 0)
/* Must cast ULONG_MAX etc to unsigned long etc, since they might not be unsigned on a K&R compiler. In particular the HP-UX 10 bundled K&R cc
treats the plain decimal values in <limits.h> as signed. */ #define ULONG_HIGHBIT (ULONG_MAX ^ ((unsignedlong) ULONG_MAX >> 1)) #define UINT_HIGHBIT (UINT_MAX ^ ((unsigned) UINT_MAX >> 1)) #define USHRT_HIGHBIT (USHRT_MAX ^ ((unsignedshort) USHRT_MAX >> 1)) #define GMP_LIMB_HIGHBIT (MP_LIMB_T_MAX ^ (MP_LIMB_T_MAX >> 1))
/* Dummy for non-gcc, code involving it will go dead. */ #if ! defined (__GNUC__) || __GNUC__ < 2 #define __builtin_constant_p(x) 0 #endif
/* In gcc 2.96 and up on i386, tail calls are optimized to jumps if the stack usage is compatible. __attribute__ ((regparm (N))) helps by putting leading parameters in registers, avoiding extra stack.
regparm cannot be used with calls going through the PLT, because the binding code there may clobber the registers (%eax, %edx, %ecx) used for the regparm parameters. Calls to local (ie. static) functions could still use this, if we cared to differentiate locals and globals.
On athlon-unknown-freebsd4.9 with gcc 3.3.3, regparm cannot be used with -p or -pg profiling, since that version of gcc doesn't realize the .mcount calls will clobber the parameter registers. Other systems are ok, like debian with glibc 2.3.2 (mcount doesn't clobber), but we don't bother to try to detect this. regparm is only an optimization so we just
disable it when profiling (profiling being a slowdown anyway). */
/* Macros for altering parameter order according to regparm usage. */ #if USE_LEADING_REGPARM #define REGPARM_2_1(a,b,x) x,a,b #define REGPARM_3_1(a,b,c,x) x,a,b,c #define REGPARM_ATTR(n) __attribute__ ((regparm (n))) #else #define REGPARM_2_1(a,b,x) a,b,x #define REGPARM_3_1(a,b,c,x) a,b,c,x #define REGPARM_ATTR(n) #endif
/* ASM_L gives a local label for a gcc asm block, for use when temporary local labels like "1:" might not be available, which is the case for instance on the x86s (the SCO assembler doesn't support them).
The label generated is made unique by including "%=" which is a unique number for each insn. This ensures the same name can be used in multiple asm blocks, perhaps via a macro. Since jumps between asm blocks are not allowed there's no need for a label to be usable outside a single
block. */
#define ASM_L(name) LSYM_PREFIX "asm_%=_"#name
#ifdefined (__GNUC__) && HAVE_HOST_CPU_FAMILY_x86 #if 0 /* FIXME: Check that these actually improve things. FIXME: Need a cld after each std. FIXME: Can't have inputs in clobbered registers, must describe them as
dummy outputs, and add volatile. */ #define MPN_COPY_INCR(DST, SRC, N) \
__asm__ ("cld\n\trep\n\tmovsl" : : \ "D" (DST), "S" (SRC), "c" (N) : \ "cx", "di", "si", "memory") #define MPN_COPY_DECR(DST, SRC, N) \
__asm__ ("std\n\trep\n\tmovsl" : : \ "D" ((DST) + (N) - 1), "S" ((SRC) + (N) - 1), "c" (N) : \ "cx", "di", "si", "memory") #endif #endif
#ifndef mpn_addmul_2 /* if not done with cpuvec in a fat binary */ #define mpn_addmul_2 __MPN(addmul_2)
__GMP_DECLSPEC mp_limb_t mpn_addmul_2 (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr); #endif
/* Alternative entry point in mpn_addmul_2 for the benefit of mpn_sqr_basecase. */ #define mpn_addmul_2s __MPN(addmul_2s)
__GMP_DECLSPEC mp_limb_t mpn_addmul_2s (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr);
/* Override mpn_addlsh1_n, mpn_addlsh2_n, mpn_sublsh1_n, etc with mpn_addlsh_n, etc when !HAVE_NATIVE the former but HAVE_NATIVE_ the latter. Similarly, override foo_ip1 functions with foo. We then lie and say these macros represent native functions, but leave a trace by using the value 2 rather
than 1. */
#ifndef mpn_lshiftc /* if not done with cpuvec in a fat binary */ #define mpn_lshiftc __MPN(lshiftc)
__GMP_DECLSPEC mp_limb_t mpn_lshiftc (mp_ptr, mp_srcptr, mp_size_t, unsignedint); #endif
#ifndef mpn_mul_basecase /* if not done with cpuvec in a fat binary */ #define mpn_mul_basecase __MPN(mul_basecase)
__GMP_DECLSPEC void mpn_mul_basecase (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); #endif
#ifndef mpn_mullo_basecase /* if not done with cpuvec in a fat binary */ #define mpn_mullo_basecase __MPN(mullo_basecase)
__GMP_DECLSPEC void mpn_mullo_basecase (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); #endif
#ifndef mpn_sqr_basecase /* if not done with cpuvec in a fat binary */ #define mpn_sqr_basecase __MPN(sqr_basecase)
__GMP_DECLSPEC void mpn_sqr_basecase (mp_ptr, mp_srcptr, mp_size_t); #endif
#ifndef mpn_redc_1 /* if not done with cpuvec in a fat binary */ #define mpn_redc_1 __MPN(redc_1)
__GMP_DECLSPEC mp_limb_t mpn_redc_1 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); #endif
#ifndef mpn_redc_2 /* if not done with cpuvec in a fat binary */ #define mpn_redc_2 __MPN(redc_2)
__GMP_DECLSPEC mp_limb_t mpn_redc_2 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr); #endif
#ifndef mpn_mod_1_1p_cps /* if not done with cpuvec in a fat binary */ #define mpn_mod_1_1p_cps __MPN(mod_1_1p_cps)
__GMP_DECLSPEC void mpn_mod_1_1p_cps (mp_limb_t [4], mp_limb_t); #endif #ifndef mpn_mod_1_1p /* if not done with cpuvec in a fat binary */ #define mpn_mod_1_1p __MPN(mod_1_1p)
__GMP_DECLSPEC mp_limb_t mpn_mod_1_1p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [4]) __GMP_ATTRIBUTE_PURE; #endif
#ifndef mpn_mod_1s_2p_cps /* if not done with cpuvec in a fat binary */ #define mpn_mod_1s_2p_cps __MPN(mod_1s_2p_cps)
__GMP_DECLSPEC void mpn_mod_1s_2p_cps (mp_limb_t [5], mp_limb_t); #endif #ifndef mpn_mod_1s_2p /* if not done with cpuvec in a fat binary */ #define mpn_mod_1s_2p __MPN(mod_1s_2p)
__GMP_DECLSPEC mp_limb_t mpn_mod_1s_2p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [5]) __GMP_ATTRIBUTE_PURE; #endif
#ifndef mpn_mod_1s_3p_cps /* if not done with cpuvec in a fat binary */ #define mpn_mod_1s_3p_cps __MPN(mod_1s_3p_cps)
__GMP_DECLSPEC void mpn_mod_1s_3p_cps (mp_limb_t [6], mp_limb_t); #endif #ifndef mpn_mod_1s_3p /* if not done with cpuvec in a fat binary */ #define mpn_mod_1s_3p __MPN(mod_1s_3p)
__GMP_DECLSPEC mp_limb_t mpn_mod_1s_3p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [6]) __GMP_ATTRIBUTE_PURE; #endif
#ifndef mpn_mod_1s_4p_cps /* if not done with cpuvec in a fat binary */ #define mpn_mod_1s_4p_cps __MPN(mod_1s_4p_cps)
__GMP_DECLSPEC void mpn_mod_1s_4p_cps (mp_limb_t [7], mp_limb_t); #endif #ifndef mpn_mod_1s_4p /* if not done with cpuvec in a fat binary */ #define mpn_mod_1s_4p __MPN(mod_1s_4p)
__GMP_DECLSPEC mp_limb_t mpn_mod_1s_4p (mp_srcptr, mp_size_t, mp_limb_t, const mp_limb_t [7]) __GMP_ATTRIBUTE_PURE; #endif
/* Macro to obtain a void pointer to the function pointers structure. */ #define RNG_FNPTR(rstate) ((rstate)->_mp_algdata._mp_lc)
/* Macro to obtain a pointer to the generator's state.
When used as a lvalue the rvalue needs to be cast to mp_ptr. */ #define RNG_STATE(rstate) ((rstate)->_mp_seed->_mp_d)
/* Write a given number of random bits to rp. */ #define _gmp_rand(rp, state, bits) \ do { \
gmp_randstate_ptr __rstate = (state); \
(*((gmp_randfnptr_t *) RNG_FNPTR (__rstate))->randget_fn) \
(__rstate, rp, bits); \
} while (0)
/* __gmp_rands is the global state for the old-style random functions, and is also used in the test programs (hence the __GMP_DECLSPEC).
There's no seeding here, so mpz_random etc will generate the same sequence every time. This is not unlike the C library random functions if you don't seed them, so perhaps it's acceptable. Digging up a seed from /dev/random or the like would work on many systems, but might encourage a false confidence, since it'd be pretty much impossible to do something that would work reliably everywhere. In any case the new style functions are recommended to applications which care about randomness, so
the old functions aren't too important. */
/* this is used by the test programs, to free memory */ #define RANDS_CLEAR() \ do { \ if (__gmp_rands_initialized) \
{ \
__gmp_rands_initialized = 0; \
gmp_randclear (__gmp_rands); \
} \
} while (0)
/* For a threshold between algorithms A and B, size>=thresh is where B should be used. Special value MP_SIZE_T_MAX means only ever use A, or value 0 means only ever use B. The tests for these special values will be compile-time constants, so the compiler should be able to eliminate
the code for the unwanted algorithm. */
/* The minimal supported value for Toom22 depends also on Toom32 and
Toom42 implementations. */ #define MPN_TOOM22_MUL_MINSIZE 6 #define MPN_TOOM2_SQR_MINSIZE 4
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.