/*
* mpi.c
*
* Arbitrary precision integer arithmetic library
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "mpi-priv.h"
#include "mplogic.h"
#include <assert.h>
#if defined(__arm__) && \
((
defined(__thumb__) && !
defined(__thumb2__)) ||
defined(__ARM_ARCH_3__))
/* 16-bit thumb or ARM v3 doesn't work inlined assember version */
#undef MP_ASSEMBLY_MULTIPLY
#undef MP_ASSEMBLY_SQUARE
#endif
#if MP_LOGTAB
/*
A table of the logs of 2 for various bases (the 0 and 1 entries of
this table are meaningless and should not be referenced).
This table is used to compute output lengths for the mp_toradix()
function. Since a number n in radix r takes up about log_r(n)
digits, we estimate the output size by taking the least integer
greater than log_r(n), where:
log_r(n) = log_2(n) * log_r(2)
This table, therefore, is a table of log_r(2) for 2 <= r <= 36,
which are the output bases supported.
*/
#include "logtab.h"
#endif
#ifdef CT_VERIF
#include <valgrind/memcheck.h>
#endif
/* {{{ Constant strings */
/* Constant strings returned by mp_strerror() */
static const char *mp_err_string[] = {
"unknown result code",
/* say what? */
"boolean true",
/* MP_OKAY, MP_YES */
"boolean false",
/* MP_NO */
"out of memory",
/* MP_MEM */
"argument out of range",
/* MP_RANGE */
"invalid input parameter",
/* MP_BADARG */
"result is undefined" /* MP_UNDEF */
};
/* Value to digit maps for radix conversion */
/* s_dmap_1 - standard digits and letters */
static const char *s_dmap_1 =
"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz+/";
/* }}} */
/* {{{ Default precision manipulation */
/* Default precision for newly created mp_int's */
static mp_size s_mp_defprec = MP_DEFPREC;
mp_size
mp_get_prec(
void)
{
return s_mp_defprec;
}
/* end mp_get_prec() */
void
mp_set_prec(mp_size prec)
{
if (prec == 0)
s_mp_defprec = MP_DEFPREC;
else
s_mp_defprec = prec;
}
/* end mp_set_prec() */
/* }}} */
#ifdef CT_VERIF
void
mp_taint(mp_int *mp)
{
size_t i;
for (i = 0; i < mp->used; ++i) {
VALGRIND_MAKE_MEM_UNDEFINED(&(mp->dp[i]),
sizeof(mp_digit));
}
}
void
mp_untaint(mp_int *mp)
{
size_t i;
for (i = 0; i < mp->used; ++i) {
VALGRIND_MAKE_MEM_DEFINED(&(mp->dp[i]),
sizeof(mp_digit));
}
}
#endif
/*------------------------------------------------------------------------*/
/* {{{ mp_init(mp) */
/*
mp_init(mp)
Initialize a new zero-valued mp_int. Returns MP_OKAY if successful,
MP_MEM if memory could not be allocated for the structure.
*/
mp_err
mp_init(mp_int *mp)
{
return mp_init_size(mp, s_mp_defprec);
}
/* end mp_init() */
/* }}} */
/* {{{ mp_init_size(mp, prec) */
/*
mp_init_size(mp, prec)
Initialize a new zero-valued mp_int with at least the given
precision; returns MP_OKAY if successful, or MP_MEM if memory could
not be allocated for the structure.
*/
mp_err
mp_init_size(mp_int *mp, mp_size prec)
{
ARGCHK(mp != NULL && prec > 0, MP_BADARG);
prec = MP_ROUNDUP(prec, s_mp_defprec);
if ((DIGITS(mp) = s_mp_alloc(prec,
sizeof(mp_digit))) == NULL)
return MP_MEM;
SIGN(mp) = ZPOS;
USED(mp) = 1;
ALLOC(mp) = prec;
return MP_OKAY;
}
/* end mp_init_size() */
/* }}} */
/* {{{ mp_init_copy(mp, from) */
/*
mp_init_copy(mp, from)
Initialize mp as an exact copy of from. Returns MP_OKAY if
successful, MP_MEM if memory could not be allocated for the new
structure.
*/
mp_err
mp_init_copy(mp_int *mp,
const mp_int *from)
{
ARGCHK(mp != NULL && from != NULL, MP_BADARG);
if (mp == from)
return MP_OKAY;
if ((DIGITS(mp) = s_mp_alloc(ALLOC(from),
sizeof(mp_digit))) == NULL)
return MP_MEM;
s_mp_copy(DIGITS(from), DIGITS(mp), USED(from));
USED(mp) = USED(from);
ALLOC(mp) = ALLOC(from);
SIGN(mp) = SIGN(from);
return MP_OKAY;
}
/* end mp_init_copy() */
/* }}} */
/* {{{ mp_copy(from, to) */
/*
mp_copy(from, to)
Copies the mp_int 'from' to the mp_int 'to'. It is presumed that
'to' has already been initialized (if not, use mp_init_copy()
instead). If 'from' and 'to' are identical, nothing happens.
*/
mp_err
mp_copy(
const mp_int *from, mp_int *to)
{
ARGCHK(from != NULL && to != NULL, MP_BADARG);
if (from == to)
return MP_OKAY;
{
/* copy */
mp_digit *tmp;
/*
If the allocated buffer in 'to' already has enough space to hold
all the used digits of 'from', we'll re-use it to avoid hitting
the memory allocater more than necessary; otherwise, we'd have
to grow anyway, so we just allocate a hunk and make the copy as
usual
*/
if (ALLOC(to) >= USED(from)) {
s_mp_setz(DIGITS(to) + USED(from), ALLOC(to) - USED(from));
s_mp_copy(DIGITS(from), DIGITS(to), USED(from));
}
else {
if ((tmp = s_mp_alloc(ALLOC(from),
sizeof(mp_digit))) == NULL)
return MP_MEM;
s_mp_copy(DIGITS(from), tmp, USED(from));
if (DIGITS(to) != NULL) {
s_mp_setz(DIGITS(to), ALLOC(to));
s_mp_free(DIGITS(to));
}
DIGITS(to) = tmp;
ALLOC(to) = ALLOC(from);
}
/* Copy the precision and sign from the original */
USED(to) = USED(from);
SIGN(to) = SIGN(from);
}
/* end copy */
return MP_OKAY;
}
/* end mp_copy() */
/* }}} */
/* {{{ mp_exch(mp1, mp2) */
/*
mp_exch(mp1, mp2)
Exchange mp1 and mp2 without allocating any intermediate memory
(well, unless you count the stack space needed for this call and the
locals it creates...). This cannot fail.
*/
void
mp_exch(mp_int *mp1, mp_int *mp2)
{
#if MP_ARGCHK == 2
assert(mp1 != NULL && mp2 != NULL);
#else
if (mp1 == NULL || mp2 == NULL)
return;
#endif
s_mp_exch(mp1, mp2);
}
/* end mp_exch() */
/* }}} */
/* {{{ mp_clear(mp) */
/*
mp_clear(mp)
Release the storage used by an mp_int, and void its fields so that
if someone calls mp_clear() again for the same int later, we won't
get tollchocked.
*/
void
mp_clear(mp_int *mp)
{
if (mp == NULL)
return;
if (DIGITS(mp) != NULL) {
s_mp_setz(DIGITS(mp), ALLOC(mp));
s_mp_free(DIGITS(mp));
DIGITS(mp) = NULL;
}
USED(mp) = 0;
ALLOC(mp) = 0;
}
/* end mp_clear() */
/* }}} */
/* {{{ mp_zero(mp) */
/*
mp_zero(mp)
Set mp to zero. Does not change the allocated size of the structure,
and therefore cannot fail (except on a bad argument, which we ignore)
*/
void
mp_zero(mp_int *mp)
{
if (mp == NULL)
return;
s_mp_setz(DIGITS(mp), ALLOC(mp));
USED(mp) = 1;
SIGN(mp) = ZPOS;
}
/* end mp_zero() */
/* }}} */
/* {{{ mp_set(mp, d) */
void
mp_set(mp_int *mp, mp_digit d)
{
if (mp == NULL)
return;
mp_zero(mp);
DIGIT(mp, 0) = d;
}
/* end mp_set() */
/* }}} */
/* {{{ mp_set_int(mp, z) */
mp_err
mp_set_int(mp_int *mp,
long z)
{
unsigned long v = labs(z);
mp_err res;
ARGCHK(mp != NULL, MP_BADARG);
/* https://bugzilla.mozilla.org/show_bug.cgi?id=1509432 */
if ((res = mp_set_ulong(mp, v)) != MP_OKAY) {
/* avoids duplicated code */
return res;
}
if (z < 0) {
SIGN(mp) = NEG;
}
return MP_OKAY;
}
/* end mp_set_int() */
/* }}} */
/* {{{ mp_set_ulong(mp, z) */
mp_err
mp_set_ulong(mp_int *mp,
unsigned long z)
{
int ix;
mp_err res;
ARGCHK(mp != NULL, MP_BADARG);
mp_zero(mp);
if (z == 0)
return MP_OKAY;
/* shortcut for zero */
if (
sizeof z <=
sizeof(mp_digit)) {
DIGIT(mp, 0) = z;
}
else {
for (ix =
sizeof(
long) - 1; ix >= 0; ix--) {
if ((res = s_mp_mul_d(mp, (UCHAR_MAX + 1))) != MP_OKAY)
return res;
res = s_mp_add_d(mp, (mp_digit)((z >> (ix * CHAR_BIT)) & UCHAR_MAX));
if (res != MP_OKAY)
return res;
}
}
return MP_OKAY;
}
/* end mp_set_ulong() */
/* }}} */
/*------------------------------------------------------------------------*/
/* {{{ Digit arithmetic */
/* {{{ mp_add_d(a, d, b) */
/*
mp_add_d(a, d, b)
Compute the sum b = a + d, for a single digit d. Respects the sign of
its primary addend (single digits are unsigned anyway).
*/
mp_err
mp_add_d(
const mp_int *a, mp_digit d, mp_int *b)
{
mp_int tmp;
mp_err res;
ARGCHK(a != NULL && b != NULL, MP_BADARG);
if ((res = mp_init_copy(&tmp, a)) != MP_OKAY)
return res;
if (SIGN(&tmp) == ZPOS) {
if ((res = s_mp_add_d(&tmp, d)) != MP_OKAY)
goto CLEANUP;
}
else if (s_mp_cmp_d(&tmp, d) >= 0) {
if ((res = s_mp_sub_d(&tmp, d)) != MP_OKAY)
goto CLEANUP;
}
else {
mp_neg(&tmp, &tmp);
DIGIT(&tmp, 0) = d - DIGIT(&tmp, 0);
}
if (s_mp_cmp_d(&tmp, 0) == 0)
SIGN(&tmp) = ZPOS;
s_mp_exch(&tmp, b);
CLEANUP:
mp_clear(&tmp);
return res;
}
/* end mp_add_d() */
/* }}} */
/* {{{ mp_sub_d(a, d, b) */
/*
mp_sub_d(a, d, b)
Compute the difference b = a - d, for a single digit d. Respects the
sign of its subtrahend (single digits are unsigned anyway).
*/
mp_err
mp_sub_d(
const mp_int *a, mp_digit d, mp_int *b)
{
mp_int tmp;
mp_err res;
ARGCHK(a != NULL && b != NULL, MP_BADARG);
if ((res = mp_init_copy(&tmp, a)) != MP_OKAY)
return res;
if (SIGN(&tmp) == NEG) {
if ((res = s_mp_add_d(&tmp, d)) != MP_OKAY)
goto CLEANUP;
}
else if (s_mp_cmp_d(&tmp, d) >= 0) {
if ((res = s_mp_sub_d(&tmp, d)) != MP_OKAY)
goto CLEANUP;
}
else {
mp_neg(&tmp, &tmp);
DIGIT(&tmp, 0) = d - DIGIT(&tmp, 0);
SIGN(&tmp) = NEG;
}
if (s_mp_cmp_d(&tmp, 0) == 0)
SIGN(&tmp) = ZPOS;
s_mp_exch(&tmp, b);
CLEANUP:
mp_clear(&tmp);
return res;
}
/* end mp_sub_d() */
/* }}} */
/* {{{ mp_mul_d(a, d, b) */
/*
mp_mul_d(a, d, b)
Compute the product b = a * d, for a single digit d. Respects the sign
of its multiplicand (single digits are unsigned anyway)
*/
mp_err
mp_mul_d(
const mp_int *a, mp_digit d, mp_int *b)
{
mp_err res;
ARGCHK(a != NULL && b != NULL, MP_BADARG);
if (d == 0) {
mp_zero(b);
return MP_OKAY;
}
if ((res = mp_copy(a, b)) != MP_OKAY)
return res;
res = s_mp_mul_d(b, d);
return res;
}
/* end mp_mul_d() */
/* }}} */
/* {{{ mp_mul_2(a, c) */
mp_err
mp_mul_2(
const mp_int *a, mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && c != NULL, MP_BADARG);
if ((res = mp_copy(a, c)) != MP_OKAY)
return res;
return s_mp_mul_2(c);
}
/* end mp_mul_2() */
/* }}} */
/* {{{ mp_div_d(a, d, q, r) */
/*
mp_div_d(a, d, q, r)
Compute the quotient q = a / d and remainder r = a mod d, for a
single digit d. Respects the sign of its divisor (single digits are
unsigned anyway).
*/
mp_err
mp_div_d(
const mp_int *a, mp_digit d, mp_int *q, mp_digit *r)
{
mp_err res;
mp_int qp;
mp_digit rem = 0;
int pow;
ARGCHK(a != NULL, MP_BADARG);
if (d == 0)
return MP_RANGE;
/* Shortcut for powers of two ... */
if ((pow = s_mp_ispow2d(d)) >= 0) {
mp_digit mask;
mask = ((mp_digit)1 << pow) - 1;
rem = DIGIT(a, 0) & mask;
if (q) {
if ((res = mp_copy(a, q)) != MP_OKAY) {
return res;
}
s_mp_div_2d(q, pow);
}
if (r)
*r = rem;
return MP_OKAY;
}
if ((res = mp_init_copy(&qp, a)) != MP_OKAY)
return res;
res = s_mp_div_d(&qp, d, &rem);
if (s_mp_cmp_d(&qp, 0) == 0)
SIGN(q) = ZPOS;
if (r) {
*r = rem;
}
if (q)
s_mp_exch(&qp, q);
mp_clear(&qp);
return res;
}
/* end mp_div_d() */
/* }}} */
/* {{{ mp_div_2(a, c) */
/*
mp_div_2(a, c)
Compute c = a / 2, disregarding the remainder.
*/
mp_err
mp_div_2(
const mp_int *a, mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && c != NULL, MP_BADARG);
if ((res = mp_copy(a, c)) != MP_OKAY)
return res;
s_mp_div_2(c);
return MP_OKAY;
}
/* end mp_div_2() */
/* }}} */
/* {{{ mp_expt_d(a, d, b) */
mp_err
mp_expt_d(
const mp_int *a, mp_digit d, mp_int *c)
{
mp_int s, x;
mp_err res;
ARGCHK(a != NULL && c != NULL, MP_BADARG);
if ((res = mp_init(&s)) != MP_OKAY)
return res;
if ((res = mp_init_copy(&x, a)) != MP_OKAY)
goto X;
DIGIT(&s, 0) = 1;
while (d != 0) {
if (d & 1) {
if ((res = s_mp_mul(&s, &x)) != MP_OKAY)
goto CLEANUP;
}
d /= 2;
if ((res = s_mp_sqr(&x)) != MP_OKAY)
goto CLEANUP;
}
s_mp_exch(&s, c);
CLEANUP:
mp_clear(&x);
X:
mp_clear(&s);
return res;
}
/* end mp_expt_d() */
/* }}} */
/* }}} */
/*------------------------------------------------------------------------*/
/* {{{ Full arithmetic */
/* {{{ mp_abs(a, b) */
/*
mp_abs(a, b)
Compute b = |a|. 'a' and 'b' may be identical.
*/
mp_err
mp_abs(
const mp_int *a, mp_int *b)
{
mp_err res;
ARGCHK(a != NULL && b != NULL, MP_BADARG);
if ((res = mp_copy(a, b)) != MP_OKAY)
return res;
SIGN(b) = ZPOS;
return MP_OKAY;
}
/* end mp_abs() */
/* }}} */
/* {{{ mp_neg(a, b) */
/*
mp_neg(a, b)
Compute b = -a. 'a' and 'b' may be identical.
*/
mp_err
mp_neg(
const mp_int *a, mp_int *b)
{
mp_err res;
ARGCHK(a != NULL && b != NULL, MP_BADARG);
if ((res = mp_copy(a, b)) != MP_OKAY)
return res;
if (s_mp_cmp_d(b, 0) == MP_EQ)
SIGN(b) = ZPOS;
else
SIGN(b) = (SIGN(b) == NEG) ? ZPOS : NEG;
return MP_OKAY;
}
/* end mp_neg() */
/* }}} */
/* {{{ mp_add(a, b, c) */
/*
mp_add(a, b, c)
Compute c = a + b. All parameters may be identical.
*/
mp_err
mp_add(
const mp_int *a,
const mp_int *b, mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG);
if (SIGN(a) == SIGN(b)) {
/* same sign: add values, keep sign */
MP_CHECKOK(s_mp_add_3arg(a, b, c));
}
else if (s_mp_cmp(a, b) >= 0) {
/* different sign: |a| >= |b| */
MP_CHECKOK(s_mp_sub_3arg(a, b, c));
}
else {
/* different sign: |a| < |b| */
MP_CHECKOK(s_mp_sub_3arg(b, a, c));
}
if (s_mp_cmp_d(c, 0) == MP_EQ)
SIGN(c) = ZPOS;
CLEANUP:
return res;
}
/* end mp_add() */
/* }}} */
/* {{{ mp_sub(a, b, c) */
/*
mp_sub(a, b, c)
Compute c = a - b. All parameters may be identical.
*/
mp_err
mp_sub(
const mp_int *a,
const mp_int *b, mp_int *c)
{
mp_err res;
int magDiff;
ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG);
if (a == b) {
mp_zero(c);
return MP_OKAY;
}
if (MP_SIGN(a) != MP_SIGN(b)) {
MP_CHECKOK(s_mp_add_3arg(a, b, c));
}
else if (!(magDiff = s_mp_cmp(a, b))) {
mp_zero(c);
res = MP_OKAY;
}
else if (magDiff > 0) {
MP_CHECKOK(s_mp_sub_3arg(a, b, c));
}
else {
MP_CHECKOK(s_mp_sub_3arg(b, a, c));
MP_SIGN(c) = !MP_SIGN(a);
}
if (s_mp_cmp_d(c, 0) == MP_EQ)
MP_SIGN(c) = MP_ZPOS;
CLEANUP:
return res;
}
/* end mp_sub() */
/* }}} */
/* {{{ s_mp_mulg(a, b, c) */
/*
s_mp_mulg(a, b, c)
Compute c = a * b. All parameters may be identical. if constantTime is set,
then the operations are done in constant time. The original is mostly
constant time as long as s_mpv_mul_d_add() is constant time. This is true
of the x86 assembler, as well as the current c code.
*/
mp_err
s_mp_mulg(
const mp_int *a,
const mp_int *b, mp_int *c,
int constantTime)
{
mp_digit *pb;
mp_int tmp;
mp_err res;
mp_size ib;
mp_size useda, usedb;
ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG);
if (a == c) {
if ((res = mp_init_copy(&tmp, a)) != MP_OKAY)
return res;
if (a == b)
b = &tmp;
a = &tmp;
}
else if (b == c) {
if ((res = mp_init_copy(&tmp, b)) != MP_OKAY)
return res;
b = &tmp;
}
else {
MP_DIGITS(&tmp) = 0;
}
if (MP_USED(a) < MP_USED(b)) {
const mp_int *xch = b;
/* switch a and b, to do fewer outer loops */
b = a;
a = xch;
}
MP_USED(c) = 1;
MP_DIGIT(c, 0) = 0;
if ((res = s_mp_pad(c, USED(a) + USED(b))) != MP_OKAY)
goto CLEANUP;
#ifdef NSS_USE_COMBA
/* comba isn't constant time because it clamps! If we cared
* (we needed a constant time version of multiply that was 'faster'
* we could easily pass constantTime down to the comba code and
* get it to skip the clamp... but here are assembler versions
* which add comba to platforms that can't compile the normal
* comba's imbedded assembler which would also need to change, so
* for now we just skip comba when we are running constant time. */
if (!constantTime && (MP_USED(a) == MP_USED(b)) && IS_POWER_OF_2(MP_USED(b))) {
if (MP_USED(a) == 4) {
s_mp_mul_comba_4(a, b, c);
goto CLEANUP;
}
if (MP_USED(a) == 8) {
s_mp_mul_comba_8(a, b, c);
goto CLEANUP;
}
if (MP_USED(a) == 16) {
s_mp_mul_comba_16(a, b, c);
goto CLEANUP;
}
if (MP_USED(a) == 32) {
s_mp_mul_comba_32(a, b, c);
goto CLEANUP;
}
}
#endif
pb = MP_DIGITS(b);
s_mpv_mul_d(MP_DIGITS(a), MP_USED(a), *pb++, MP_DIGITS(c));
/* Outer loop: Digits of b */
useda = MP_USED(a);
usedb = MP_USED(b);
for (ib = 1; ib < usedb; ib++) {
mp_digit b_i = *pb++;
/* Inner product: Digits of a */
if (constantTime || b_i)
s_mpv_mul_d_add(MP_DIGITS(a), useda, b_i, MP_DIGITS(c) + ib);
else
MP_DIGIT(c, ib + useda) = b_i;
}
if (!constantTime) {
s_mp_clamp(c);
}
if (SIGN(a) == SIGN(b) || s_mp_cmp_d(c, 0) == MP_EQ)
SIGN(c) = ZPOS;
else
SIGN(c) = NEG;
CLEANUP:
mp_clear(&tmp);
return res;
}
/* end smp_mulg() */
/* }}} */
/* {{{ mp_mul(a, b, c) */
/*
mp_mul(a, b, c)
Compute c = a * b. All parameters may be identical.
*/
mp_err
mp_mul(
const mp_int *a,
const mp_int *b, mp_int *c)
{
return s_mp_mulg(a, b, c, 0);
}
/* end mp_mul() */
/* }}} */
/* {{{ mp_mulCT(a, b, c) */
/*
mp_mulCT(a, b, c)
Compute c = a * b. In constant time. Parameters may not be identical.
NOTE: a and b may be modified.
*/
mp_err
mp_mulCT(mp_int *a, mp_int *b, mp_int *c, mp_size setSize)
{
mp_err res;
/* make the multiply values fixed length so multiply
* doesn't leak the length. at this point all the
* values are blinded, but once we finish we want the
* output size to be hidden (so no clamping the out put) */
MP_CHECKOK(s_mp_pad(a, setSize));
MP_CHECKOK(s_mp_pad(b, setSize));
MP_CHECKOK(s_mp_pad(c, 2 * setSize));
MP_CHECKOK(s_mp_mulg(a, b, c, 1));
CLEANUP:
return res;
}
/* end mp_mulCT() */
/* }}} */
/* {{{ mp_sqr(a, sqr) */
#if MP_SQUARE
/*
Computes the square of a. This can be done more
efficiently than a general multiplication, because many of the
computation steps are redundant when squaring. The inner product
step is a bit more complicated, but we save a fair number of
iterations of the multiplication loop.
*/
/* sqr = a^2; Caller provides both a and tmp; */
mp_err
mp_sqr(
const mp_int *a, mp_int *sqr)
{
mp_digit *pa;
mp_digit d;
mp_err res;
mp_size ix;
mp_int tmp;
int count;
ARGCHK(a != NULL && sqr != NULL, MP_BADARG);
if (a == sqr) {
if ((res = mp_init_copy(&tmp, a)) != MP_OKAY)
return res;
a = &tmp;
}
else {
DIGITS(&tmp) = 0;
res = MP_OKAY;
}
ix = 2 * MP_USED(a);
if (ix > MP_ALLOC(sqr)) {
MP_USED(sqr) = 1;
MP_CHECKOK(s_mp_grow(sqr, ix));
}
MP_USED(sqr) = ix;
MP_DIGIT(sqr, 0) = 0;
#ifdef NSS_USE_COMBA
if (IS_POWER_OF_2(MP_USED(a))) {
if (MP_USED(a) == 4) {
s_mp_sqr_comba_4(a, sqr);
goto CLEANUP;
}
if (MP_USED(a) == 8) {
s_mp_sqr_comba_8(a, sqr);
goto CLEANUP;
}
if (MP_USED(a) == 16) {
s_mp_sqr_comba_16(a, sqr);
goto CLEANUP;
}
if (MP_USED(a) == 32) {
s_mp_sqr_comba_32(a, sqr);
goto CLEANUP;
}
}
#endif
pa = MP_DIGITS(a);
count = MP_USED(a) - 1;
if (count > 0) {
d = *pa++;
s_mpv_mul_d(pa, count, d, MP_DIGITS(sqr) + 1);
for (ix = 3; --count > 0; ix += 2) {
d = *pa++;
s_mpv_mul_d_add(pa, count, d, MP_DIGITS(sqr) + ix);
}
/* for(ix ...) */
MP_DIGIT(sqr, MP_USED(sqr) - 1) = 0;
/* above loop stopped short of this. */
/* now sqr *= 2 */
s_mp_mul_2(sqr);
}
else {
MP_DIGIT(sqr, 1) = 0;
}
/* now add the squares of the digits of a to sqr. */
s_mpv_sqr_add_prop(MP_DIGITS(a), MP_USED(a), MP_DIGITS(sqr));
SIGN(sqr) = ZPOS;
s_mp_clamp(sqr);
CLEANUP:
mp_clear(&tmp);
return res;
}
/* end mp_sqr() */
#endif
/* }}} */
/* {{{ mp_div(a, b, q, r) */
/*
mp_div(a, b, q, r)
Compute q = a / b and r = a mod b. Input parameters may be re-used
as output parameters. If q or r is NULL, that portion of the
computation will be discarded (although it will still be computed)
*/
mp_err
mp_div(
const mp_int *a,
const mp_int *b, mp_int *q, mp_int *r)
{
mp_err res;
mp_int *pQ, *pR;
mp_int qtmp, rtmp, btmp;
int cmp;
mp_sign signA;
mp_sign signB;
ARGCHK(a != NULL && b != NULL, MP_BADARG);
signA = MP_SIGN(a);
signB = MP_SIGN(b);
if (mp_cmp_z(b) == MP_EQ)
return MP_RANGE;
DIGITS(&qtmp) = 0;
DIGITS(&rtmp) = 0;
DIGITS(&btmp) = 0;
/* Set up some temporaries... */
if (!r || r == a || r == b) {
MP_CHECKOK(mp_init_copy(&rtmp, a));
pR = &rtmp;
}
else {
MP_CHECKOK(mp_copy(a, r));
pR = r;
}
if (!q || q == a || q == b) {
MP_CHECKOK(mp_init_size(&qtmp, MP_USED(a)));
pQ = &qtmp;
}
else {
MP_CHECKOK(s_mp_pad(q, MP_USED(a)));
pQ = q;
mp_zero(pQ);
}
/*
If |a| <= |b|, we can compute the solution without division;
otherwise, we actually do the work required.
*/
if ((cmp = s_mp_cmp(a, b)) <= 0) {
if (cmp) {
/* r was set to a above. */
mp_zero(pQ);
}
else {
mp_set(pQ, 1);
mp_zero(pR);
}
}
else {
MP_CHECKOK(mp_init_copy(&btmp, b));
MP_CHECKOK(s_mp_div(pR, &btmp, pQ));
}
/* Compute the signs for the output */
MP_SIGN(pR) = signA;
/* Sr = Sa */
/* Sq = ZPOS if Sa == Sb */ /* Sq = NEG if Sa != Sb */
MP_SIGN(pQ) = (signA == signB) ? ZPOS : NEG;
if (s_mp_cmp_d(pQ, 0) == MP_EQ)
SIGN(pQ) = ZPOS;
if (s_mp_cmp_d(pR, 0) == MP_EQ)
SIGN(pR) = ZPOS;
/* Copy output, if it is needed */
if (q && q != pQ)
s_mp_exch(pQ, q);
if (r && r != pR)
s_mp_exch(pR, r);
CLEANUP:
mp_clear(&btmp);
mp_clear(&rtmp);
mp_clear(&qtmp);
return res;
}
/* end mp_div() */
/* }}} */
/* {{{ mp_div_2d(a, d, q, r) */
mp_err
mp_div_2d(
const mp_int *a, mp_digit d, mp_int *q, mp_int *r)
{
mp_err res;
ARGCHK(a != NULL, MP_BADARG);
if (q) {
if ((res = mp_copy(a, q)) != MP_OKAY)
return res;
}
if (r) {
if ((res = mp_copy(a, r)) != MP_OKAY)
return res;
}
if (q) {
s_mp_div_2d(q, d);
}
if (r) {
s_mp_mod_2d(r, d);
}
return MP_OKAY;
}
/* end mp_div_2d() */
/* }}} */
/* {{{ mp_expt(a, b, c) */
/*
mp_expt(a, b, c)
Compute c = a ** b, that is, raise a to the b power. Uses a
standard iterative square-and-multiply technique.
*/
mp_err
mp_expt(mp_int *a, mp_int *b, mp_int *c)
{
mp_int s, x;
mp_err res;
mp_digit d;
unsigned int dig, bit;
ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG);
if (mp_cmp_z(b) < 0)
return MP_RANGE;
if ((res = mp_init(&s)) != MP_OKAY)
return res;
mp_set(&s, 1);
if ((res = mp_init_copy(&x, a)) != MP_OKAY)
goto X;
/* Loop over low-order digits in ascending order */
for (dig = 0; dig < (USED(b) - 1); dig++) {
d = DIGIT(b, dig);
/* Loop over bits of each non-maximal digit */
for (bit = 0; bit < DIGIT_BIT; bit++) {
if (d & 1) {
if ((res = s_mp_mul(&s, &x)) != MP_OKAY)
goto CLEANUP;
}
d >>= 1;
if ((res = s_mp_sqr(&x)) != MP_OKAY)
goto CLEANUP;
}
}
/* Consider now the last digit... */
d = DIGIT(b, dig);
while (d) {
if (d & 1) {
if ((res = s_mp_mul(&s, &x)) != MP_OKAY)
goto CLEANUP;
}
d >>= 1;
if ((res = s_mp_sqr(&x)) != MP_OKAY)
goto CLEANUP;
}
if (mp_iseven(b))
SIGN(&s) = SIGN(a);
res = mp_copy(&s, c);
CLEANUP:
mp_clear(&x);
X:
mp_clear(&s);
return res;
}
/* end mp_expt() */
/* }}} */
/* {{{ mp_2expt(a, k) */
/* Compute a = 2^k */
mp_err
mp_2expt(mp_int *a, mp_digit k)
{
ARGCHK(a != NULL, MP_BADARG);
return s_mp_2expt(a, k);
}
/* end mp_2expt() */
/* }}} */
/* {{{ mp_mod(a, m, c) */
/*
mp_mod(a, m, c)
Compute c = a (mod m). Result will always be 0 <= c < m.
*/
mp_err
mp_mod(
const mp_int *a,
const mp_int *m, mp_int *c)
{
mp_err res;
int mag;
ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG);
if (SIGN(m) == NEG)
return MP_RANGE;
/*
If |a| > m, we need to divide to get the remainder and take the
absolute value.
If |a| < m, we don't need to do any division, just copy and adjust
the sign (if a is negative).
If |a| == m, we can simply set the result to zero.
This order is intended to minimize the average path length of the
comparison chain on common workloads -- the most frequent cases are
that |a| != m, so we do those first.
*/
if ((mag = s_mp_cmp(a, m)) > 0) {
if ((res = mp_div(a, m, NULL, c)) != MP_OKAY)
return res;
if (SIGN(c) == NEG) {
if ((res = mp_add(c, m, c)) != MP_OKAY)
return res;
}
}
else if (mag < 0) {
if ((res = mp_copy(a, c)) != MP_OKAY)
return res;
if (mp_cmp_z(a) < 0) {
if ((res = mp_add(c, m, c)) != MP_OKAY)
return res;
}
}
else {
mp_zero(c);
}
return MP_OKAY;
}
/* end mp_mod() */
/* }}} */
/* {{{ s_mp_subCT_d(a, b, borrow, c) */
/*
s_mp_subCT_d(a, b, borrow, c)
Compute c = (a -b) - subtract in constant time. returns borrow
*/
mp_digit
s_mp_subCT_d(mp_digit a, mp_digit b, mp_digit borrow, mp_digit *ret)
{
*ret = a - b - borrow;
return MP_CT_LTU(a, *ret) | (MP_CT_EQ(a, *ret) & borrow);
}
/* s_mp_subCT_d() */
/* }}} */
/* {{{ mp_subCT(a, b, ret, borrow) */
/* return ret= a - b and borrow in borrow. done in constant time.
* b could be modified.
*/
mp_err
mp_subCT(
const mp_int *a, mp_int *b, mp_int *ret, mp_digit *borrow)
{
mp_size used_a = MP_USED(a);
mp_size i;
mp_err res;
MP_CHECKOK(s_mp_pad(b, used_a));
MP_CHECKOK(s_mp_pad(ret, used_a));
*borrow = 0;
for (i = 0; i < used_a; i++) {
*borrow = s_mp_subCT_d(MP_DIGIT(a, i), MP_DIGIT(b, i), *borrow,
&MP_DIGIT(ret, i));
}
res = MP_OKAY;
CLEANUP:
return res;
}
/* end mp_subCT() */
/* }}} */
/* {{{ mp_selectCT(cond, a, b, ret) */
/*
* return ret= cond ? a : b; cond should be either 0 or 1
*/
mp_err
mp_selectCT(mp_digit cond,
const mp_int *a,
const mp_int *b, mp_int *ret)
{
mp_size used_a = MP_USED(a);
mp_err res;
mp_size i;
cond *= MP_DIGIT_MAX;
/* we currently require these to be equal on input,
* we could use pad to extend one of them, but that might
* leak data as it wouldn't be constant time */
if (used_a != MP_USED(b)) {
return MP_BADARG;
}
MP_CHECKOK(s_mp_pad(ret, used_a));
for (i = 0; i < used_a; i++) {
MP_DIGIT(ret, i) = MP_CT_SEL_DIGIT(cond, MP_DIGIT(a, i), MP_DIGIT(b, i));
}
res = MP_OKAY;
CLEANUP:
return res;
}
/* end mp_selectCT() */
/* {{{ mp_reduceCT(a, m, c) */
/*
mp_reduceCT(a, m, c)
Compute c = aR^-1 (mod m) in constant time.
input should be in montgomery form. If input is the
result of a montgomery multiply then out put will be
in mongomery form.
Result will be reduced to MP_USED(m), but not be
clamped.
*/
mp_err
mp_reduceCT(
const mp_int *a,
const mp_int *m, mp_digit n0i, mp_int *c)
{
mp_size used_m = MP_USED(m);
mp_size used_c = used_m * 2 + 1;
mp_digit *m_digits, *c_digits;
mp_size i;
mp_digit borrow, carry;
mp_err res;
mp_int sub;
MP_DIGITS(&sub) = 0;
MP_CHECKOK(mp_init_size(&sub, used_m));
if (a != c) {
MP_CHECKOK(mp_copy(a, c));
}
MP_CHECKOK(s_mp_pad(c, used_c));
m_digits = MP_DIGITS(m);
c_digits = MP_DIGITS(c);
for (i = 0; i < used_m; i++) {
mp_digit m_i = MP_DIGIT(c, i) * n0i;
s_mpv_mul_d_add_propCT(m_digits, used_m, m_i, c_digits++, used_c--);
}
s_mp_rshd(c, used_m);
/* MP_USED(c) should be used_m+1 with the high word being any carry
* from the previous multiply, save that carry and drop the high
* word for the substraction below */
carry = MP_DIGIT(c, used_m);
MP_DIGIT(c, used_m) = 0;
MP_USED(c) = used_m;
/* mp_subCT wants c and m to be the same size, we've already
* guarrenteed that in the previous statement, so mp_subCT won't actually
* modify m, so it's safe to recast */
MP_CHECKOK(mp_subCT(c, (mp_int *)m, &sub, &borrow));
/* we return c-m if c >= m no borrow or there was a borrow and a carry */
MP_CHECKOK(mp_selectCT(borrow ^ carry, c, &sub, c));
res = MP_OKAY;
CLEANUP:
mp_clear(&sub);
return res;
}
/* end mp_reduceCT() */
/* }}} */
/* {{{ mp_mod_d(a, d, c) */
/*
mp_mod_d(a, d, c)
Compute c = a (mod d). Result will always be 0 <= c < d
*/
mp_err
mp_mod_d(
const mp_int *a, mp_digit d, mp_digit *c)
{
mp_err res;
mp_digit rem;
ARGCHK(a != NULL && c != NULL, MP_BADARG);
if (s_mp_cmp_d(a, d) > 0) {
if ((res = mp_div_d(a, d, NULL, &rem)) != MP_OKAY)
return res;
}
else {
if (SIGN(a) == NEG)
rem = d - DIGIT(a, 0);
else
rem = DIGIT(a, 0);
}
if (c)
*c = rem;
return MP_OKAY;
}
/* end mp_mod_d() */
/* }}} */
/* }}} */
/*------------------------------------------------------------------------*/
/* {{{ Modular arithmetic */
#if MP_MODARITH
/* {{{ mp_addmod(a, b, m, c) */
/*
mp_addmod(a, b, m, c)
Compute c = (a + b) mod m
*/
mp_err
mp_addmod(
const mp_int *a,
const mp_int *b,
const mp_int *m, mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && b != NULL && m != NULL && c != NULL, MP_BADARG);
if ((res = mp_add(a, b, c)) != MP_OKAY)
return res;
if ((res = mp_mod(c, m, c)) != MP_OKAY)
return res;
return MP_OKAY;
}
/* }}} */
/* {{{ mp_submod(a, b, m, c) */
/*
mp_submod(a, b, m, c)
Compute c = (a - b) mod m
*/
mp_err
mp_submod(
const mp_int *a,
const mp_int *b,
const mp_int *m, mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && b != NULL && m != NULL && c != NULL, MP_BADARG);
if ((res = mp_sub(a, b, c)) != MP_OKAY)
return res;
if ((res = mp_mod(c, m, c)) != MP_OKAY)
return res;
return MP_OKAY;
}
/* }}} */
/* {{{ mp_mulmod(a, b, m, c) */
/*
mp_mulmod(a, b, m, c)
Compute c = (a * b) mod m
*/
mp_err
mp_mulmod(
const mp_int *a,
const mp_int *b,
const mp_int *m, mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && b != NULL && m != NULL && c != NULL, MP_BADARG);
if ((res = mp_mul(a, b, c)) != MP_OKAY)
return res;
if ((res = mp_mod(c, m, c)) != MP_OKAY)
return res;
return MP_OKAY;
}
/* }}} */
/* {{{ mp_mulmontmodCT(a, b, m, c) */
/*
mp_mulmontmodCT(a, b, m, c)
Compute c = (a * b) mod m in constant time wrt a and b. either a or b
should be in montgomery form and the output is native. If both a and b
are in montgomery form, then the output will also be in montgomery form
and can be recovered with an mp_reduceCT call.
NOTE: a and b may be modified.
*/
mp_err
mp_mulmontmodCT(mp_int *a, mp_int *b,
const mp_int *m, mp_digit n0i,
mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && b != NULL && m != NULL && c != NULL, MP_BADARG);
if ((res = mp_mulCT(a, b, c, MP_USED(m))) != MP_OKAY)
return res;
if ((res = mp_reduceCT(c, m, n0i, c)) != MP_OKAY)
return res;
return MP_OKAY;
}
/* }}} */
/* {{{ mp_sqrmod(a, m, c) */
#if MP_SQUARE
mp_err
mp_sqrmod(
const mp_int *a,
const mp_int *m, mp_int *c)
{
mp_err res;
ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG);
if ((res = mp_sqr(a, c)) != MP_OKAY)
return res;
if ((res = mp_mod(c, m, c)) != MP_OKAY)
return res;
return MP_OKAY;
}
/* end mp_sqrmod() */
#endif
/* }}} */
/* {{{ s_mp_exptmod(a, b, m, c) */
/*
s_mp_exptmod(a, b, m, c)
Compute c = (a ** b) mod m. Uses a standard square-and-multiply
method with modular reductions at each step. (This is basically the
same code as mp_expt(), except for the addition of the reductions)
The modular reductions are done using Barrett's algorithm (see
s_mp_reduce() below for details)
*/
mp_err
s_mp_exptmod(
const mp_int *a,
const mp_int *b,
const mp_int *m, mp_int *c)
{
mp_int s, x, mu;
mp_err res;
mp_digit d;
unsigned int dig, bit;
ARGCHK(a != NULL && b != NULL && c != NULL && m != NULL, MP_BADARG);
if (mp_cmp_z(b) < 0 || mp_cmp_z(m) <= 0)
return MP_RANGE;
if ((res = mp_init(&s)) != MP_OKAY)
return res;
if ((res = mp_init_copy(&x, a)) != MP_OKAY ||
(res = mp_mod(&x, m, &x)) != MP_OKAY)
goto X;
if ((res = mp_init(&mu)) != MP_OKAY)
goto MU;
mp_set(&s, 1);
/* mu = b^2k / m */
if ((res = s_mp_add_d(&mu, 1)) != MP_OKAY)
goto CLEANUP;
if ((res = s_mp_lshd(&mu, 2 * USED(m))) != MP_OKAY)
goto CLEANUP;
if ((res = mp_div(&mu, m, &mu, NULL)) != MP_OKAY)
goto CLEANUP;
/* Loop over digits of b in ascending order, except highest order */
for (dig = 0; dig < (USED(b) - 1); dig++) {
d = DIGIT(b, dig);
/* Loop over the bits of the lower-order digits */
for (bit = 0; bit < DIGIT_BIT; bit++) {
if (d & 1) {
if ((res = s_mp_mul(&s, &x)) != MP_OKAY)
goto CLEANUP;
if ((res = s_mp_reduce(&s, m, &mu)) != MP_OKAY)
goto CLEANUP;
}
d >>= 1;
if ((res = s_mp_sqr(&x)) != MP_OKAY)
goto CLEANUP;
if ((res = s_mp_reduce(&x, m, &mu)) != MP_OKAY)
goto CLEANUP;
}
}
/* Now do the last digit... */
d = DIGIT(b, dig);
while (d) {
if (d & 1) {
if ((res = s_mp_mul(&s, &x)) != MP_OKAY)
goto CLEANUP;
if ((res = s_mp_reduce(&s, m, &mu)) != MP_OKAY)
goto CLEANUP;
}
d >>= 1;
if ((res = s_mp_sqr(&x)) != MP_OKAY)
goto CLEANUP;
if ((res = s_mp_reduce(&x, m, &mu)) != MP_OKAY)
goto CLEANUP;
}
s_mp_exch(&s, c);
CLEANUP:
mp_clear(&mu);
MU:
mp_clear(&x);
X:
mp_clear(&s);
return res;
}
/* end s_mp_exptmod() */
/* }}} */
/* {{{ mp_exptmod_d(a, d, m, c) */
mp_err
mp_exptmod_d(
const mp_int *a, mp_digit d,
const mp_int *m, mp_int *c)
{
mp_int s, x;
mp_err res;
ARGCHK(a != NULL && c != NULL && m != NULL, MP_BADARG);
if ((res = mp_init(&s)) != MP_OKAY)
return res;
if ((res = mp_init_copy(&x, a)) != MP_OKAY)
goto X;
mp_set(&s, 1);
while (d != 0) {
if (d & 1) {
if ((res = s_mp_mul(&s, &x)) != MP_OKAY ||
(res = mp_mod(&s, m, &s)) != MP_OKAY)
goto CLEANUP;
}
d /= 2;
if ((res = s_mp_sqr(&x)) != MP_OKAY ||
(res = mp_mod(&x, m, &x)) != MP_OKAY)
goto CLEANUP;
}
s_mp_exch(&s, c);
CLEANUP:
mp_clear(&x);
X:
mp_clear(&s);
return res;
}
/* end mp_exptmod_d() */
/* }}} */
#endif /* if MP_MODARITH */
/* }}} */
/*------------------------------------------------------------------------*/
/* {{{ Comparison functions */
/* {{{ mp_cmp_z(a) */
/*
mp_cmp_z(a)
Compare a <=> 0. Returns <0 if a<0, 0 if a=0, >0 if a>0.
*/
int
mp_cmp_z(
const mp_int *a)
{
ARGMPCHK(a != NULL);
if (SIGN(a) == NEG)
return MP_LT;
else if (USED(a) == 1 && DIGIT(a, 0) == 0)
return MP_EQ;
else
return MP_GT;
}
/* end mp_cmp_z() */
/* }}} */
/* {{{ mp_cmp_d(a, d) */
/*
mp_cmp_d(a, d)
Compare a <=> d. Returns <0 if a<d, 0 if a=d, >0 if a>d
*/
int
mp_cmp_d(
const mp_int *a, mp_digit d)
{
ARGCHK(a != NULL, MP_EQ);
if (SIGN(a) == NEG)
return MP_LT;
return s_mp_cmp_d(a, d);
}
/* end mp_cmp_d() */
/* }}} */
/* {{{ mp_cmp(a, b) */
int
mp_cmp(
const mp_int *a,
const mp_int *b)
{
ARGCHK(a != NULL && b != NULL, MP_EQ);
if (SIGN(a) == SIGN(b)) {
int mag;
if ((mag = s_mp_cmp(a, b)) == MP_EQ)
return MP_EQ;
if (SIGN(a) == ZPOS)
return mag;
else
return -mag;
}
else if (SIGN(a) == ZPOS) {
return MP_GT;
}
else {
return MP_LT;
}
}
/* end mp_cmp() */
/* }}} */
/* {{{ mp_cmp_mag(a, b) */
/*
mp_cmp_mag(a, b)
Compares |a| <=> |b|, and returns an appropriate comparison result
*/
int
mp_cmp_mag(
const mp_int *a,
const mp_int *b)
{
ARGCHK(a != NULL && b != NULL, MP_EQ);
return s_mp_cmp(a, b);
}
/* end mp_cmp_mag() */
/* }}} */
/* {{{ mp_isodd(a) */
/*
mp_isodd(a)
Returns a true (non-zero) value if a is odd, false (zero) otherwise.
*/
int
mp_isodd(
const mp_int *a)
{
ARGMPCHK(a != NULL);
return (
int)(DIGIT(a, 0) & 1);
}
/* end mp_isodd() */
/* }}} */
/* {{{ mp_iseven(a) */
int
mp_iseven(
const mp_int *a)
{
return !mp_isodd(a);
}
/* end mp_iseven() */
/* }}} */
/* }}} */
/*------------------------------------------------------------------------*/
/* {{{ Number theoretic functions */
/* {{{ mp_gcd(a, b, c) */
/*
Computes the GCD using the constant-time algorithm
by Bernstein and Yang (https://eprint.iacr.org/2019/266)
"Fast constant-time gcd computation and modular inversion"
*/
mp_err
mp_gcd(mp_int *a, mp_int *b, mp_int *c)
{
mp_err res;
mp_digit cond = 0, mask = 0;
mp_int g, temp, f;
int i, j, m, bit = 1, delta = 1, shifts = 0, last = -1;
mp_size top, flen, glen;
mp_int *clear[3];
ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG);
/*
Early exit if either of the inputs is zero.
Caller is responsible for the proper handling of inputs.
*/
if (mp_cmp_z(a) == MP_EQ) {
res = mp_copy(b, c);
SIGN(c) = ZPOS;
return res;
}
else if (mp_cmp_z(b) == MP_EQ) {
res = mp_copy(a, c);
SIGN(c) = ZPOS;
return res;
}
MP_CHECKOK(mp_init(&temp));
clear[++last] = &temp;
MP_CHECKOK(mp_init_copy(&g, a));
clear[++last] = &g;
MP_CHECKOK(mp_init_copy(&f, b));
clear[++last] = &f;
/*
For even case compute the number of
shared powers of 2 in f and g.
*/
for (i = 0; i < USED(&f) && i < USED(&g); i++) {
mask = ~(DIGIT(&f, i) | DIGIT(&g, i));
for (j = 0; j < MP_DIGIT_BIT; j++) {
bit &= mask;
shifts += bit;
mask >>= 1;
}
}
/* Reduce to the odd case by removing the powers of 2. */
s_mp_div_2d(&f, shifts);
s_mp_div_2d(&g, shifts);
/* Allocate to the size of largest mp_int. */
top = (mp_size)1 + ((USED(&f) >= USED(&g)) ? USED(&f) : USED(&g));
MP_CHECKOK(s_mp_grow(&f, top));
MP_CHECKOK(s_mp_grow(&g, top));
MP_CHECKOK(s_mp_grow(&temp, top));
/* Make sure f contains the odd value. */
MP_CHECKOK(mp_cswap((~DIGIT(&f, 0) & 1), &f, &g, top));
/* Upper bound for the total iterations. */
flen = mpl_significant_bits(&f);
glen = mpl_significant_bits(&g);
m = 4 + 3 * ((flen >= glen) ? flen : glen);
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4146)
// Thanks MSVC, we know what we're negating an unsigned mp_digit
#endif
for (i = 0; i < m; i++) {
/* Step 1: conditional swap. */
/* Set cond if delta > 0 and g is odd. */
cond = (-delta >> (8 *
sizeof(delta) - 1)) & DIGIT(&g, 0) & 1;
/* If cond is set replace (delta,f) with (-delta,-f). */
delta = (-cond & -delta) | ((cond - 1) & delta);
SIGN(&f) ^= cond;
/* If cond is set swap f with g. */
MP_CHECKOK(mp_cswap(cond, &f, &g, top));
/* Step 2: elemination. */
/* Update delta. */
delta++;
/* If g is odd, right shift (g+f) else right shift g. */
MP_CHECKOK(mp_add(&g, &f, &temp));
MP_CHECKOK(mp_cswap((DIGIT(&g, 0) & 1), &g, &temp, top));
s_mp_div_2(&g);
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
/* GCD is in f, take the absolute value. */
SIGN(&f) = ZPOS;
/* Add back the removed powers of 2. */
MP_CHECKOK(s_mp_mul_2d(&f, shifts));
MP_CHECKOK(mp_copy(&f, c));
CLEANUP:
while (last >= 0)
mp_clear(clear[last--]);
return res;
}
/* end mp_gcd() */
/* }}} */
/* {{{ mp_lcm(a, b, c) */
/* We compute the least common multiple using the rule:
ab = [a, b](a, b)
... by computing the product, and dividing out the gcd.
*/
mp_err
mp_lcm(mp_int *a, mp_int *b, mp_int *c)
{
mp_int gcd, prod;
mp_err res;
ARGCHK(a != NULL && b != NULL && c != NULL, MP_BADARG);
/* Set up temporaries */
if ((res = mp_init(&gcd)) != MP_OKAY)
return res;
if ((res = mp_init(&prod)) != MP_OKAY)
goto GCD;
if ((res = mp_mul(a, b, &prod)) != MP_OKAY)
goto CLEANUP;
if ((res = mp_gcd(a, b, &gcd)) != MP_OKAY)
goto CLEANUP;
res = mp_div(&prod, &gcd, c, NULL);
CLEANUP:
mp_clear(&prod);
GCD:
mp_clear(&gcd);
return res;
}
/* end mp_lcm() */
/* }}} */
/* {{{ mp_xgcd(a, b, g, x, y) */
/*
mp_xgcd(a, b, g, x, y)
Compute g = (a, b) and values x and y satisfying Bezout's identity
(that is, ax + by = g). This uses the binary extended GCD algorithm
based on the Stein algorithm used for mp_gcd()
See algorithm 14.61 in Handbook of Applied Cryptogrpahy.
*/
mp_err
mp_xgcd(
const mp_int *a,
const mp_int *b, mp_int *g, mp_int *x, mp_int *y)
{
mp_int gx, xc, yc, u, v, A, B, C, D;
mp_int *clean[9];
mp_err res;
int last = -1;
if (mp_cmp_z(b) == 0)
return MP_RANGE;
/* Initialize all these variables we need */
MP_CHECKOK(mp_init(&u));
clean[++last] = &u;
MP_CHECKOK(mp_init(&v));
clean[++last] = &v;
MP_CHECKOK(mp_init(&gx));
clean[++last] = &gx;
MP_CHECKOK(mp_init(&A));
clean[++last] = &A;
MP_CHECKOK(mp_init(&B));
clean[++last] = &B;
MP_CHECKOK(mp_init(&C));
clean[++last] = &C;
MP_CHECKOK(mp_init(&D));
clean[++last] = &D;
MP_CHECKOK(mp_init_copy(&xc, a));
clean[++last] = &xc;
mp_abs(&xc, &xc);
MP_CHECKOK(mp_init_copy(&yc, b));
clean[++last] = &yc;
mp_abs(&yc, &yc);
mp_set(&gx, 1);
/* Divide by two until at least one of them is odd */
while (mp_iseven(&xc) && mp_iseven(&yc)) {
mp_size nx = mp_trailing_zeros(&xc);
mp_size ny = mp_trailing_zeros(&yc);
mp_size n = MP_MIN(nx, ny);
s_mp_div_2d(&xc, n);
s_mp_div_2d(&yc, n);
MP_CHECKOK(s_mp_mul_2d(&gx, n));
}
MP_CHECKOK(mp_copy(&xc, &u));
MP_CHECKOK(mp_copy(&yc, &v));
mp_set(&A, 1);
mp_set(&D, 1);
/* Loop through binary GCD algorithm */
do {
while (mp_iseven(&u)) {
s_mp_div_2(&u);
if (mp_iseven(&A) && mp_iseven(&B)) {
s_mp_div_2(&A);
s_mp_div_2(&B);
}
else {
MP_CHECKOK(mp_add(&A, &yc, &A));
s_mp_div_2(&A);
MP_CHECKOK(mp_sub(&B, &xc, &B));
s_mp_div_2(&B);
}
}
while (mp_iseven(&v)) {
s_mp_div_2(&v);
if (mp_iseven(&C) && mp_iseven(&D)) {
s_mp_div_2(&C);
s_mp_div_2(&D);
}
else {
MP_CHECKOK(mp_add(&C, &yc, &C));
s_mp_div_2(&C);
MP_CHECKOK(mp_sub(&D, &xc, &D));
s_mp_div_2(&D);
}
}
if (mp_cmp(&u, &v) >= 0) {
MP_CHECKOK(mp_sub(&u, &v, &u));
MP_CHECKOK(mp_sub(&A, &C, &A));
MP_CHECKOK(mp_sub(&B, &D, &B));
}
else {
MP_CHECKOK(mp_sub(&v, &u, &v));
MP_CHECKOK(mp_sub(&C, &A, &C));
MP_CHECKOK(mp_sub(&D, &B, &D));
}
}
while (mp_cmp_z(&u) != 0);
/* copy results to output */
if (x)
MP_CHECKOK(mp_copy(&C, x));
if (y)
MP_CHECKOK(mp_copy(&D, y));
if (g)
MP_CHECKOK(mp_mul(&gx, &v, g));
CLEANUP:
while (last >= 0)
mp_clear(clean[last--]);
return res;
}
/* end mp_xgcd() */
/* }}} */
mp_size
mp_trailing_zeros(
const mp_int *mp)
{
mp_digit d;
mp_size n = 0;
unsigned int ix;
if (!mp || !MP_DIGITS(mp) || !mp_cmp_z(mp))
return n;
for (ix = 0; !(d = MP_DIGIT(mp, ix)) && (ix < MP_USED(mp)); ++ix)
n += MP_DIGIT_BIT;
if (!d)
return 0;
/* shouldn't happen, but ... */
#if !
defined(MP_USE_UINT_DIGIT)
if (!(d & 0xffffffffU)) {
d >>= 32;
n += 32;
}
#endif
if (!(d & 0xffffU)) {
d >>= 16;
n += 16;
}
if (!(d & 0xffU)) {
d >>= 8;
n += 8;
}
if (!(d & 0xfU)) {
d >>= 4;
n += 4;
}
if (!(d & 0x3U)) {
d >>= 2;
n += 2;
}
if (!(d & 0x1U)) {
d >>= 1;
n += 1;
}
#if MP_ARGCHK == 2
assert(0 != (d & 1));
#endif
return n;
}
/* Given a and prime p, computes c and k such that a*c == 2**k (mod p).
** Returns k (positive) or error (negative).
** This technique from the paper "Fast Modular Reciprocals" (unpublished)
** by Richard Schroeppel (a.k.a. Captain Nemo).
*/
mp_err
s_mp_almost_inverse(
const mp_int *a,
const mp_int *p, mp_int *c)
{
mp_err res;
mp_err k = 0;
mp_int d, f, g;
ARGCHK(a != NULL && p != NULL && c != NULL, MP_BADARG);
MP_DIGITS(&d) = 0;
MP_DIGITS(&f) = 0;
MP_DIGITS(&g) = 0;
MP_CHECKOK(mp_init(&d));
MP_CHECKOK(mp_init_copy(&f, a));
/* f = a */
MP_CHECKOK(mp_init_copy(&g, p));
/* g = p */
mp_set(c, 1);
mp_zero(&d);
if (mp_cmp_z(&f) == 0) {
res = MP_UNDEF;
}
else
for (;;) {
int diff_sign;
while (mp_iseven(&f)) {
mp_size n = mp_trailing_zeros(&f);
if (!n) {
res = MP_UNDEF;
goto CLEANUP;
}
s_mp_div_2d(&f, n);
MP_CHECKOK(s_mp_mul_2d(&d, n));
k += n;
}
if (mp_cmp_d(&f, 1) == MP_EQ) {
/* f == 1 */
res = k;
break;
}
diff_sign = mp_cmp(&f, &g);
if (diff_sign < 0) {
/* f < g */
s_mp_exch(&f, &g);
s_mp_exch(c, &d);
}
else if (diff_sign == 0) {
/* f == g */
res = MP_UNDEF;
/* a and p are not relatively prime */
break;
}
if ((MP_DIGIT(&f, 0) % 4) == (MP_DIGIT(&g, 0) % 4)) {
MP_CHECKOK(mp_sub(&f, &g, &f));
/* f = f - g */
MP_CHECKOK(mp_sub(c, &d, c));
/* c = c - d */
}
else {
MP_CHECKOK(mp_add(&f, &g, &f));
/* f = f + g */
MP_CHECKOK(mp_add(c, &d, c));
/* c = c + d */
}
}
if (res >= 0) {
if (mp_cmp_mag(c, p) >= 0) {
MP_CHECKOK(mp_div(c, p, NULL, c));
}
if (MP_SIGN(c) != MP_ZPOS) {
MP_CHECKOK(mp_add(c, p, c));
}
res = k;
}
CLEANUP:
mp_clear(&d);
mp_clear(&f);
mp_clear(&g);
return res;
}
/* Compute T = (P ** -1) mod MP_RADIX. Also works for 16-bit mp_digits.
** This technique from the paper "Fast Modular Reciprocals" (unpublished)
** by Richard Schroeppel (a.k.a. Captain Nemo).
*/
mp_digit
s_mp_invmod_radix(mp_digit P)
{
mp_digit T = P;
T *= 2 - (P * T);
T *= 2 - (P * T);
T *= 2 - (P * T);
T *= 2 - (P * T);
#if !
defined(MP_USE_UINT_DIGIT)
T *= 2 - (P * T);
T *= 2 - (P * T);
#endif
return T;
}
/* Given c, k, and prime p, where a*c == 2**k (mod p),
** Compute x = (a ** -1) mod p. This is similar to Montgomery reduction.
** This technique from the paper "Fast Modular Reciprocals" (unpublished)
** by Richard Schroeppel (a.k.a. Captain Nemo).
*/
mp_err
s_mp_fixup_reciprocal(
const mp_int *c,
const mp_int *p,
int k, mp_int *x)
{
int k_orig = k;
mp_digit r;
mp_size ix;
mp_err res;
if (mp_cmp_z(c) < 0) {
/* c < 0 */
MP_CHECKOK(mp_add(c, p, x));
/* x = c + p */
}
else {
MP_CHECKOK(mp_copy(c, x));
/* x = c */
}
/* make sure x is large enough */
ix = MP_HOWMANY(k, MP_DIGIT_BIT) + MP_USED(p) + 1;
ix = MP_MAX(ix, MP_USED(x));
MP_CHECKOK(s_mp_pad(x, ix));
r = 0 - s_mp_invmod_radix(MP_DIGIT(p, 0));
for (ix = 0; k > 0; ix++) {
int j = MP_MIN(k, MP_DIGIT_BIT);
mp_digit v = r * MP_DIGIT(x, ix);
if (j < MP_DIGIT_BIT) {
v &= ((mp_digit)1 << j) - 1;
/* v = v mod (2 ** j) */
}
s_mp_mul_d_add_offset(p, v, x, ix);
/* x += p * v * (RADIX ** ix) */
k -= j;
}
s_mp_clamp(x);
s_mp_div_2d(x, k_orig);
res = MP_OKAY;
CLEANUP:
return res;
}
/*
Computes the modular inverse using the constant-time algorithm
by Bernstein and Yang (https://eprint.iacr.org/2019/266)
"Fast constant-time gcd computation and modular inversion"
*/
mp_err
s_mp_invmod_odd_m(
const mp_int *a,
const mp_int *m, mp_int *c)
{
mp_err res;
mp_digit cond = 0;
mp_int g, f, v, r, temp;
int i, its, delta = 1, last = -1;
mp_size top, flen, glen;
mp_int *clear[6];
ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG);
/* Check for invalid inputs. */
if (mp_cmp_z(a) == MP_EQ || mp_cmp_d(m, 2) == MP_LT)
return MP_RANGE;
if (a == m || mp_iseven(m))
return MP_UNDEF;
MP_CHECKOK(mp_init(&temp));
clear[++last] = &temp;
MP_CHECKOK(mp_init(&v));
clear[++last] = &v;
MP_CHECKOK(mp_init(&r));
clear[++last] = &r;
MP_CHECKOK(mp_init_copy(&g, a));
clear[++last] = &g;
MP_CHECKOK(mp_init_copy(&f, m));
clear[++last] = &f;
mp_set(&v, 0);
mp_set(&r, 1);
/* Allocate to the size of largest mp_int. */
top = (mp_size)1 + ((USED(&f) >= USED(&g)) ? USED(&f) : USED(&g));
MP_CHECKOK(s_mp_grow(&f, top));
MP_CHECKOK(s_mp_grow(&g, top));
MP_CHECKOK(s_mp_grow(&temp, top));
MP_CHECKOK(s_mp_grow(&v, top));
MP_CHECKOK(s_mp_grow(&r, top));
/* Upper bound for the total iterations. */
flen = mpl_significant_bits(&f);
glen = mpl_significant_bits(&g);
its = 4 + 3 * ((flen >= glen) ? flen : glen);
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4146)
// Thanks MSVC, we know what we're negating an unsigned mp_digit
#endif
for (i = 0; i < its; i++) {
/* Step 1: conditional swap. */
/* Set cond if delta > 0 and g is odd. */
cond = (-delta >> (8 *
sizeof(delta) - 1)) & DIGIT(&g, 0) & 1;
/* If cond is set replace (delta,f,v) with (-delta,-f,-v). */
delta = (-cond & -delta) | ((cond - 1) & delta);
SIGN(&f) ^= cond;
SIGN(&v) ^= cond;
/* If cond is set swap (f,v) with (g,r). */
MP_CHECKOK(mp_cswap(cond, &f, &g, top));
MP_CHECKOK(mp_cswap(cond, &v, &r, top));
/* Step 2: elemination. */
/* Update delta */
delta++;
/* If g is odd replace r with (r+v). */
MP_CHECKOK(mp_add(&r, &v, &temp));
MP_CHECKOK(mp_cswap((DIGIT(&g, 0) & 1), &r, &temp, top));
/* If g is odd, right shift (g+f) else right shift g. */
MP_CHECKOK(mp_add(&g, &f, &temp));
MP_CHECKOK(mp_cswap((DIGIT(&g, 0) & 1), &g, &temp, top));
s_mp_div_2(&g);
/*
If r is even, right shift it.
If r is odd, right shift (r+m) which is even because m is odd.
We want the result modulo m so adding in multiples of m here vanish.
*/
MP_CHECKOK(mp_add(&r, m, &temp));
MP_CHECKOK(mp_cswap((DIGIT(&r, 0) & 1), &r, &temp, top));
s_mp_div_2(&r);
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
/* We have the inverse in v, propagate sign from f. */
SIGN(&v) ^= SIGN(&f);
/* GCD is in f, take the absolute value. */
SIGN(&f) = ZPOS;
/* If gcd != 1, not invertible. */
if (mp_cmp_d(&f, 1) != MP_EQ) {
res = MP_UNDEF;
goto CLEANUP;
}
/* Return inverse modulo m. */
MP_CHECKOK(mp_mod(&v, m, c));
CLEANUP:
while (last >= 0)
mp_clear(clear[last--]);
return res;
}
/* Known good algorithm for computing modular inverse. But slow. */
mp_err
mp_invmod_xgcd(
const mp_int *a,
const mp_int *m, mp_int *c)
{
mp_int g, x;
mp_err res;
ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG);
if (mp_cmp_z(a) == 0 || mp_cmp_z(m) == 0)
return MP_RANGE;
MP_DIGITS(&g) = 0;
MP_DIGITS(&x) = 0;
MP_CHECKOK(mp_init(&x));
MP_CHECKOK(mp_init(&g));
MP_CHECKOK(mp_xgcd(a, m, &g, &x, NULL));
if (mp_cmp_d(&g, 1) != MP_EQ) {
res = MP_UNDEF;
goto CLEANUP;
}
res = mp_mod(&x, m, c);
SIGN(c) = SIGN(a);
CLEANUP:
mp_clear(&x);
mp_clear(&g);
return res;
}
/* modular inverse where modulus is 2**k. */
/* c = a**-1 mod 2**k */
mp_err
s_mp_invmod_2d(
const mp_int *a, mp_size k, mp_int *c)
{
mp_err res;
mp_size ix = k + 4;
mp_int t0, t1, val, tmp, two2k;
static const mp_digit d2 = 2;
static const mp_int two = { MP_ZPOS, 1, 1, (mp_digit *)&d2 };
if (mp_iseven(a))
return MP_UNDEF;
#if defined(_MSC_VER)
#pragma warning(push)
#pragma warning(disable : 4146)
// Thanks MSVC, we know what we're negating an unsigned mp_digit
#endif
if (k <= MP_DIGIT_BIT) {
mp_digit i = s_mp_invmod_radix(MP_DIGIT(a, 0));
/* propagate the sign from mp_int */
i = (i ^ -(mp_digit)SIGN(a)) + (mp_digit)SIGN(a);
if (k < MP_DIGIT_BIT)
i &= ((mp_digit)1 << k) - (mp_digit)1;
mp_set(c, i);
return MP_OKAY;
}
#if defined(_MSC_VER)
#pragma warning(pop)
#endif
MP_DIGITS(&t0) = 0;
MP_DIGITS(&t1) = 0;
MP_DIGITS(&val) = 0;
MP_DIGITS(&tmp) = 0;
MP_DIGITS(&two2k) = 0;
MP_CHECKOK(mp_init_copy(&val, a));
s_mp_mod_2d(&val, k);
MP_CHECKOK(mp_init_copy(&t0, &val));
MP_CHECKOK(mp_init_copy(&t1, &t0));
MP_CHECKOK(mp_init(&tmp));
MP_CHECKOK(mp_init(&two2k));
MP_CHECKOK(s_mp_2expt(&two2k, k));
do {
MP_CHECKOK(mp_mul(&val, &t1, &tmp));
MP_CHECKOK(mp_sub(&two, &tmp, &tmp));
MP_CHECKOK(mp_mul(&t1, &tmp, &t1));
s_mp_mod_2d(&t1, k);
while (MP_SIGN(&t1) != MP_ZPOS) {
MP_CHECKOK(mp_add(&t1, &two2k, &t1));
}
if (mp_cmp(&t1, &t0) == MP_EQ)
break;
MP_CHECKOK(mp_copy(&t1, &t0));
}
while (--ix > 0);
if (!ix) {
res = MP_UNDEF;
}
else {
mp_exch(c, &t1);
}
CLEANUP:
mp_clear(&t0);
mp_clear(&t1);
mp_clear(&val);
mp_clear(&tmp);
mp_clear(&two2k);
return res;
}
mp_err
s_mp_invmod_even_m(
const mp_int *a,
const mp_int *m, mp_int *c)
{
mp_err res;
mp_size k;
mp_int oddFactor, evenFactor;
/* factors of the modulus */
mp_int oddPart, evenPart;
/* parts to combine via CRT. */
mp_int C2, tmp1, tmp2;
ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG);
/*static const mp_digit d1 = 1; */
/*static const mp_int one = { MP_ZPOS, 1, 1, (mp_digit *)&d1 }; */
if ((res = s_mp_ispow2(m)) >= 0) {
k = res;
return s_mp_invmod_2d(a, k, c);
}
MP_DIGITS(&oddFactor) = 0;
MP_DIGITS(&evenFactor) = 0;
MP_DIGITS(&oddPart) = 0;
MP_DIGITS(&evenPart) = 0;
MP_DIGITS(&C2) = 0;
MP_DIGITS(&tmp1) = 0;
MP_DIGITS(&tmp2) = 0;
MP_CHECKOK(mp_init_copy(&oddFactor, m));
/* oddFactor = m */
MP_CHECKOK(mp_init(&evenFactor));
MP_CHECKOK(mp_init(&oddPart));
MP_CHECKOK(mp_init(&evenPart));
MP_CHECKOK(mp_init(&C2));
MP_CHECKOK(mp_init(&tmp1));
MP_CHECKOK(mp_init(&tmp2));
k = mp_trailing_zeros(m);
s_mp_div_2d(&oddFactor, k);
MP_CHECKOK(s_mp_2expt(&evenFactor, k));
/* compute a**-1 mod oddFactor. */
MP_CHECKOK(s_mp_invmod_odd_m(a, &oddFactor, &oddPart));
/* compute a**-1 mod evenFactor, where evenFactor == 2**k. */
MP_CHECKOK(s_mp_invmod_2d(a, k, &evenPart));
/* Use Chinese Remainer theorem to compute a**-1 mod m. */
/* let m1 = oddFactor, v1 = oddPart,
* let m2 = evenFactor, v2 = evenPart.
*/
/* Compute C2 = m1**-1 mod m2. */
MP_CHECKOK(s_mp_invmod_2d(&oddFactor, k, &C2));
/* compute u = (v2 - v1)*C2 mod m2 */
MP_CHECKOK(mp_sub(&evenPart, &oddPart, &tmp1));
MP_CHECKOK(mp_mul(&tmp1, &C2, &tmp2));
s_mp_mod_2d(&tmp2, k);
while (MP_SIGN(&tmp2) != MP_ZPOS) {
MP_CHECKOK(mp_add(&tmp2, &evenFactor, &tmp2));
}
/* compute answer = v1 + u*m1 */
MP_CHECKOK(mp_mul(&tmp2, &oddFactor, c));
MP_CHECKOK(mp_add(&oddPart, c, c));
/* not sure this is necessary, but it's low cost if not. */
MP_CHECKOK(mp_mod(c, m, c));
CLEANUP:
mp_clear(&oddFactor);
mp_clear(&evenFactor);
mp_clear(&oddPart);
mp_clear(&evenPart);
mp_clear(&C2);
mp_clear(&tmp1);
mp_clear(&tmp2);
return res;
}
/* {{{ mp_invmod(a, m, c) */
/*
mp_invmod(a, m, c)
Compute c = a^-1 (mod m), if there is an inverse for a (mod m).
This is equivalent to the question of whether (a, m) = 1. If not,
MP_UNDEF is returned, and there is no inverse.
*/
mp_err
mp_invmod(
const mp_int *a,
const mp_int *m, mp_int *c)
{
ARGCHK(a != NULL && m != NULL && c != NULL, MP_BADARG);
if (mp_cmp_z(a) == 0 || mp_cmp_z(m) == 0)
return MP_RANGE;
if (mp_isodd(m)) {
return s_mp_invmod_odd_m(a, m, c);
}
if (mp_iseven(a))
return MP_UNDEF;
/* not invertable */
return s_mp_invmod_even_m(a, m, c);
}
/* end mp_invmod() */
/* }}} */
/* }}} */
/*------------------------------------------------------------------------*/
/* {{{ mp_print(mp, ofp) */
#if MP_IOFUNC
/*
mp_print(mp, ofp)
Print a textual representation of the given mp_int on the output
stream 'ofp'. Output is generated using the internal radix.
*/
void
mp_print(mp_int *mp, FILE *ofp)
{
int ix;
if (mp == NULL || ofp == NULL)
return;
fputc((SIGN(mp) == NEG) ?
'-' :
'+', ofp);
for (ix = USED(mp) - 1; ix >= 0; ix--) {
fprintf(ofp, DIGIT_FMT, DIGIT(mp, ix));
}
}
/* end mp_print() */
#endif /* if MP_IOFUNC */
/* }}} */
/*------------------------------------------------------------------------*/
/* {{{ More I/O Functions */
/* {{{ mp_read_raw(mp, str, len) */
/*
mp_read_raw(mp, str, len)
Read in a raw value (base 256) into the given mp_int
*/
mp_err
mp_read_raw(mp_int *mp,
char *str,
int len)
{
int ix;
mp_err res;
unsigned char *ustr = (
unsigned char *)str;
ARGCHK(mp != NULL && str != NULL && len > 0, MP_BADARG);
mp_zero(mp);
/* Read the rest of the digits */
for (ix = 1; ix < len; ix++) {
if ((res = mp_mul_d(mp, 256, mp)) != MP_OKAY)
return res;
if ((res = mp_add_d(mp, ustr[ix], mp)) != MP_OKAY)
return res;
}
/* Get sign from first byte */
if (ustr[0])
SIGN(mp) = NEG;
else
SIGN(mp) = ZPOS;
return MP_OKAY;
}
/* end mp_read_raw() */
/* }}} */
/* {{{ mp_raw_size(mp) */
int
mp_raw_size(mp_int *mp)
{
ARGCHK(mp != NULL, 0);
return (USED(mp) *
sizeof(mp_digit)) + 1;
}
/* end mp_raw_size() */
/* }}} */
/* {{{ mp_toraw(mp, str) */
mp_err
mp_toraw(mp_int *mp,
char *str)
{
int ix, jx, pos = 1;
ARGCHK(mp != NULL && str != NULL, MP_BADARG);
str[0] = (
char)SIGN(mp);
/* Iterate over each digit... */
for (ix = USED(mp) - 1; ix >= 0; ix--) {
mp_digit d = DIGIT(mp, ix);
/* Unpack digit bytes, high order first */
for (jx =
sizeof(mp_digit) - 1; jx >= 0; jx--) {
str[pos++] = (
char)(d >> (jx * CHAR_BIT));
}
}
return MP_OKAY;
}
/* end mp_toraw() */
/* }}} */
/* {{{ mp_read_radix(mp, str, radix) */
/*
mp_read_radix(mp, str, radix)
Read an integer from the given string, and set mp to the resulting
value. The input is presumed to be in base 10. Leading non-digit
characters are ignored, and the function reads until a non-digit
character or the end of the string.
*/
mp_err
mp_read_radix(mp_int *mp,
const char *str,
int radix)
{
int ix = 0, val = 0;
mp_err res;
mp_sign sig = ZPOS;
ARGCHK(mp != NULL && str != NULL && radix >= 2 && radix <= MAX_RADIX,
MP_BADARG);
mp_zero(mp);
/* Skip leading non-digit characters until a digit or '-' or '+' */
while (str[ix] &&
(s_mp_tovalue(str[ix], radix) < 0) &&
str[ix] !=
'-' &&
str[ix] !=
'+') {
++ix;
}
if (str[ix] ==
'-') {
sig = NEG;
++ix;
}
else if (str[ix] ==
'+') {
sig = ZPOS;
/* this is the default anyway... */
++ix;
}
while ((val = s_mp_tovalue(str[ix], radix)) >= 0) {
if ((res = s_mp_mul_d(mp, radix)) != MP_OKAY)
return res;
if ((res = s_mp_add_d(mp, val)) != MP_OKAY)
return res;
++ix;
}
if (s_mp_cmp_d(mp, 0) == MP_EQ)
SIGN(mp) = ZPOS;
else
SIGN(mp) = sig;
return MP_OKAY;
}
/* end mp_read_radix() */
mp_err
mp_read_variable_radix(mp_int *a,
const char *str,
int default_radix)
{
int radix = default_radix;
int cx;
mp_sign sig = ZPOS;
mp_err res;
/* Skip leading non-digit characters until a digit or '-' or '+' */
while ((cx = *str) != 0 &&
(s_mp_tovalue(cx, radix) < 0) &&
cx !=
'-' &&
cx !=
'+') {
++str;
}
if (cx ==
'-') {
sig = NEG;
++str;
}
else if (cx ==
'+') {
sig = ZPOS;
/* this is the default anyway... */
++str;
}
if (str[0] ==
'0') {
if ((str[1] | 0x20) ==
'x') {
radix = 16;
str += 2;
}
else {
radix = 8;
str++;
}
}
res = mp_read_radix(a, str, radix);
if (res == MP_OKAY) {
MP_SIGN(a) = (s_mp_cmp_d(a, 0) == MP_EQ) ? ZPOS : sig;
}
return res;
}
/* }}} */
/* {{{ mp_radix_size(mp, radix) */
int
mp_radix_size(mp_int *mp,
int radix)
{
int bits;
if (!mp || radix < 2 || radix > MAX_RADIX)
return 0;
bits = USED(mp) * DIGIT_BIT - 1;
return SIGN(mp) + s_mp_outlen(bits, radix);
}
/* end mp_radix_size() */
/* }}} */
/* {{{ mp_toradix(mp, str, radix) */
mp_err
mp_toradix(mp_int *mp,
char *str,
int radix)
{
int ix, pos = 0;
ARGCHK(mp != NULL && str != NULL, MP_BADARG);
ARGCHK(radix > 1 && radix <= MAX_RADIX, MP_RANGE);
if (mp_cmp_z(mp) == MP_EQ) {
str[0] =
'0';
str[1] =
'\0';
}
else {
mp_err res;
mp_int tmp;
mp_sign sgn;
mp_digit rem, rdx = (mp_digit)radix;
char ch;
if ((res = mp_init_copy(&tmp, mp)) != MP_OKAY)
return res;
/* Save sign for later, and take absolute value */
sgn = SIGN(&tmp);
SIGN(&tmp) = ZPOS;
/* Generate output digits in reverse order */
while (mp_cmp_z(&tmp) != 0) {
if ((res = mp_div_d(&tmp, rdx, &tmp, &rem)) != MP_OKAY) {
mp_clear(&tmp);
return res;
}
/* Generate digits, use capital letters */
ch = s_mp_todigit(rem, radix, 0);
--> --------------------
--> maximum size reached
--> --------------------