diff --git a/.gitignore b/.gitignore index 1dc7a8fc1..b92cd9b14 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,7 @@ Cargo.lock #IntelliJ project files .idea *.iml + +CMakeLists.txt +cmake-build-debug +.vscode/ diff --git a/Cargo.toml b/Cargo.toml index 5f2bc5bc3..e44746e1d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "secp256k1" -version = "0.13.0" +version = "0.13.1" authors = [ "Dawid Ciężarkiewicz ", "Andrew Poelstra " ] license = "CC0-1.0" diff --git a/build.rs b/build.rs index 664af177f..92a4d278e 100644 --- a/build.rs +++ b/build.rs @@ -49,10 +49,15 @@ fn main() { .flag_if_supported("-Wno-unused-function") // some ecmult stuff is defined but not used upstream .define("SECP256K1_BUILD", Some("1")) // TODO these three should be changed to use libgmp, at least until secp PR 290 is merged - .define("USE_NUM_NONE", Some("1")) + // .define("USE_NUM_NONE", Some("1")) + .define("USE_NUM_GMP", Some("1")) + .define("GMP_NUMB_BITS", Some("32")) + // .define("USE_NUM_NONE", Some("1")) .define("USE_FIELD_INV_BUILTIN", Some("1")) .define("USE_SCALAR_INV_BUILTIN", Some("1")) - .define("ENABLE_MODULE_ECDH", Some("1")); + .define("ENABLE_MODULE_ECDH", Some("1")) + .define("USE_EXTERNAL_DEFAULT_CALLBACKS", Some("1")) + .define("ECMULT_WINDOW_SIZE", Some("4")); // This is the default in the configure file (`auto`) #[cfg(feature = "endomorphism")] base_config.define("USE_ENDOMORPHISM", Some("1")); diff --git a/depend/secp256k1/.travis.yml b/depend/secp256k1/.travis.yml index c4154e9a8..74f658f4d 100644 --- a/depend/secp256k1/.travis.yml +++ b/depend/secp256k1/.travis.yml @@ -1,5 +1,5 @@ language: c -sudo: false +os: linux addons: apt: packages: libgmp-dev @@ -66,4 +66,3 @@ script: - if [ -n "$HOST" ]; then export USE_HOST="--host=$HOST"; fi - if [ "x$HOST" = "xi686-linux-gnu" ]; then export CC="$CC -m32"; fi - ./configure --enable-experimental=$EXPERIMENTAL --enable-endomorphism=$ENDOMORPHISM --with-field=$FIELD --with-bignum=$BIGNUM --with-scalar=$SCALAR --enable-ecmult-static-precomputation=$STATICPRECOMPUTATION --enable-module-ecdh=$ECDH --enable-module-recovery=$RECOVERY --enable-jni=$JNI $EXTRAFLAGS $USE_HOST && make -j2 $BUILD -os: linux diff --git a/depend/secp256k1/Makefile.am b/depend/secp256k1/Makefile.am index 01fd0cd6d..21df09f41 100644 --- a/depend/secp256k1/Makefile.am +++ b/depend/secp256k1/Makefile.am @@ -8,6 +8,7 @@ else JNI_LIB = endif include_HEADERS = include/secp256k1.h +include_HEADERS += include/secp256k1_preallocated.h noinst_HEADERS = noinst_HEADERS += src/scalar.h noinst_HEADERS += src/scalar_4x64.h @@ -114,7 +115,7 @@ exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDE if !ENABLE_COVERAGE exhaustive_tests_CPPFLAGS += -DVERIFY endif -exhaustive_tests_LDADD = $(SECP_LIBS) +exhaustive_tests_LDADD = $(SECP_LIBS) $(COMMON_LIB) exhaustive_tests_LDFLAGS = -static TESTS += exhaustive_tests endif @@ -151,7 +152,6 @@ endif if USE_ECMULT_STATIC_PRECOMPUTATION CPPFLAGS_FOR_BUILD +=-I$(top_srcdir) -CFLAGS_FOR_BUILD += -Wall -Wextra -Wno-unused-function gen_context_OBJECTS = gen_context.o gen_context_BIN = gen_context$(BUILD_EXEEXT) @@ -159,7 +159,7 @@ gen_%.o: src/gen_%.c $(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ $(gen_context_BIN): $(gen_context_OBJECTS) - $(CC_FOR_BUILD) $^ -o $@ + $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@ $(libsecp256k1_la_OBJECTS): src/ecmult_static_context.h $(tests_OBJECTS): src/ecmult_static_context.h diff --git a/depend/secp256k1/configure.ac b/depend/secp256k1/configure.ac index 68c45a56f..b8340b7de 100644 --- a/depend/secp256k1/configure.ac +++ b/depend/secp256k1/configure.ac @@ -85,42 +85,42 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], ]) AC_ARG_ENABLE(benchmark, - AS_HELP_STRING([--enable-benchmark],[compile benchmark (default is yes)]), + AS_HELP_STRING([--enable-benchmark],[compile benchmark [default=yes]]), [use_benchmark=$enableval], [use_benchmark=yes]) AC_ARG_ENABLE(coverage, - AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis]), + AS_HELP_STRING([--enable-coverage],[enable compiler flags to support kcov coverage analysis [default=no]]), [enable_coverage=$enableval], [enable_coverage=no]) AC_ARG_ENABLE(tests, - AS_HELP_STRING([--enable-tests],[compile tests (default is yes)]), + AS_HELP_STRING([--enable-tests],[compile tests [default=yes]]), [use_tests=$enableval], [use_tests=yes]) AC_ARG_ENABLE(openssl_tests, - AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests, if OpenSSL is available (default is auto)]), + AS_HELP_STRING([--enable-openssl-tests],[enable OpenSSL tests [default=auto]]), [enable_openssl_tests=$enableval], [enable_openssl_tests=auto]) AC_ARG_ENABLE(experimental, - AS_HELP_STRING([--enable-experimental],[allow experimental configure options (default is no)]), + AS_HELP_STRING([--enable-experimental],[allow experimental configure options [default=no]]), [use_experimental=$enableval], [use_experimental=no]) AC_ARG_ENABLE(exhaustive_tests, - AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests (default is yes)]), + AS_HELP_STRING([--enable-exhaustive-tests],[compile exhaustive tests [default=yes]]), [use_exhaustive_tests=$enableval], [use_exhaustive_tests=yes]) AC_ARG_ENABLE(endomorphism, - AS_HELP_STRING([--enable-endomorphism],[enable endomorphism (default is no)]), + AS_HELP_STRING([--enable-endomorphism],[enable endomorphism [default=no]]), [use_endomorphism=$enableval], [use_endomorphism=no]) AC_ARG_ENABLE(ecmult_static_precomputation, - AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing (default is yes)]), + AS_HELP_STRING([--enable-ecmult-static-precomputation],[enable precomputed ecmult table for signing [default=auto]]), [use_ecmult_static_precomputation=$enableval], [use_ecmult_static_precomputation=auto]) @@ -130,65 +130,100 @@ AC_ARG_ENABLE(module_ecdh, [enable_module_ecdh=no]) AC_ARG_ENABLE(module_recovery, - AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module (default is no)]), + AS_HELP_STRING([--enable-module-recovery],[enable ECDSA pubkey recovery module [default=no]]), [enable_module_recovery=$enableval], [enable_module_recovery=no]) +AC_ARG_ENABLE(external_default_callbacks, + AS_HELP_STRING([--enable-external-default-callbacks],[enable external default callback functions (default is no)]), + [use_external_default_callbacks=$enableval], + [use_external_default_callbacks=no]) + AC_ARG_ENABLE(jni, - AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni (default is no)]), + AS_HELP_STRING([--enable-jni],[enable libsecp256k1_jni [default=no]]), [use_jni=$enableval], [use_jni=no]) AC_ARG_WITH([field], [AS_HELP_STRING([--with-field=64bit|32bit|auto], -[Specify Field Implementation. Default is auto])],[req_field=$withval], [req_field=auto]) +[finite field implementation to use [default=auto]])],[req_field=$withval], [req_field=auto]) AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto], -[Specify Bignum Implementation. Default is auto])],[req_bignum=$withval], [req_bignum=auto]) +[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto]) AC_ARG_WITH([scalar], [AS_HELP_STRING([--with-scalar=64bit|32bit|auto], -[Specify scalar implementation. Default is auto])],[req_scalar=$withval], [req_scalar=auto]) +[scalar implementation to use [default=auto]])],[req_scalar=$withval], [req_scalar=auto]) -AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto] -[Specify assembly optimizations to use. Default is auto (experimental: arm)])],[req_asm=$withval], [req_asm=auto]) +AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], +[assembly optimizations to use (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto]) -AC_CHECK_TYPES([__int128]) +AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto], +[window size for ecmult precomputation for verification, specified as integer in range [2..24].] +[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.] +[The table will store 2^(SIZE-2) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.] +[If the endomorphism optimization is enabled, two tables of this size are used instead of only one.] +["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]] +)], +[req_ecmult_window=$withval], [req_ecmult_window=auto]) -AC_MSG_CHECKING([for __builtin_expect]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[void myfunc() {__builtin_expect(0,0);}]])], - [ AC_MSG_RESULT([yes]);AC_DEFINE(HAVE_BUILTIN_EXPECT,1,[Define this symbol if __builtin_expect is available]) ], - [ AC_MSG_RESULT([no]) - ]) +AC_CHECK_TYPES([__int128]) if test x"$enable_coverage" = x"yes"; then AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code]) CFLAGS="$CFLAGS -O0 --coverage" - LDFLAGS="--coverage" + LDFLAGS="$LDFLAGS --coverage" else CFLAGS="$CFLAGS -O3" fi if test x"$use_ecmult_static_precomputation" != x"no"; then + # Temporarily switch to an environment for the native compiler save_cross_compiling=$cross_compiling cross_compiling=no - TEMP_CC="$CC" + SAVE_CC="$CC" CC="$CC_FOR_BUILD" - AC_MSG_CHECKING([native compiler: ${CC_FOR_BUILD}]) + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS_FOR_BUILD" + SAVE_CPPFLAGS="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS_FOR_BUILD" + SAVE_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS_FOR_BUILD" + + warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function" + saved_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $warn_CFLAGS_FOR_BUILD" + AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}]) + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], + [ AC_MSG_RESULT([yes]) ], + [ AC_MSG_RESULT([no]) + CFLAGS="$saved_CFLAGS" + ]) + + AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}]) AC_RUN_IFELSE( - [AC_LANG_PROGRAM([], [return 0])], + [AC_LANG_PROGRAM([], [])], [working_native_cc=yes], [working_native_cc=no],[dnl]) - CC="$TEMP_CC" + + CFLAGS_FOR_BUILD="$CFLAGS" + + # Restore the environment cross_compiling=$save_cross_compiling + CC="$SAVE_CC" + CFLAGS="$SAVE_CFLAGS" + CPPFLAGS="$SAVE_CPPFLAGS" + LDFLAGS="$SAVE_LDFLAGS" if test x"$working_native_cc" = x"no"; then + AC_MSG_RESULT([no]) set_precomp=no + m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) if test x"$use_ecmult_static_precomputation" = x"yes"; then - AC_MSG_ERROR([${CC_FOR_BUILD} does not produce working binaries. Please set CC_FOR_BUILD]) + AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) else - AC_MSG_RESULT([${CC_FOR_BUILD} does not produce working binaries. Please set CC_FOR_BUILD]) + AC_MSG_WARN([Disabling statically generated ecmult table because the native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) fi else - AC_MSG_RESULT([ok]) + AC_MSG_RESULT([yes]) set_precomp=yes fi else @@ -366,6 +401,28 @@ case $set_scalar in ;; esac +#set ecmult window size +if test x"$req_ecmult_window" = x"auto"; then + set_ecmult_window=15 +else + set_ecmult_window=$req_ecmult_window +fi + +error_window_size=['window size for ecmult precomputation not an integer in range [2..24] or "auto"'] +case $set_ecmult_window in +''|*[[!0-9]]*) + # no valid integer + AC_MSG_ERROR($error_window_size) + ;; +*) + if test "$set_ecmult_window" -lt 2 -o "$set_ecmult_window" -gt 24 ; then + # not in range + AC_MSG_ERROR($error_window_size) + fi + AC_DEFINE_UNQUOTED(ECMULT_WINDOW_SIZE, $set_ecmult_window, [Set window size for ecmult precomputation]) + ;; +esac + if test x"$use_tests" = x"yes"; then SECP_OPENSSL_CHECK if test x"$has_openssl_ec" = x"yes"; then @@ -441,17 +498,9 @@ if test x"$use_external_asm" = x"yes"; then AC_DEFINE(USE_EXTERNAL_ASM, 1, [Define this symbol if an external (non-inline) assembly implementation is used]) fi -AC_MSG_NOTICE([Using static precomputation: $set_precomp]) -AC_MSG_NOTICE([Using assembly optimizations: $set_asm]) -AC_MSG_NOTICE([Using field implementation: $set_field]) -AC_MSG_NOTICE([Using bignum implementation: $set_bignum]) -AC_MSG_NOTICE([Using scalar implementation: $set_scalar]) -AC_MSG_NOTICE([Using endomorphism optimizations: $use_endomorphism]) -AC_MSG_NOTICE([Building benchmarks: $use_benchmark]) -AC_MSG_NOTICE([Building for coverage analysis: $enable_coverage]) -AC_MSG_NOTICE([Building ECDH module: $enable_module_ecdh]) -AC_MSG_NOTICE([Building ECDSA pubkey recovery module: $enable_module_recovery]) -AC_MSG_NOTICE([Using jni: $use_jni]) +if test x"$use_external_default_callbacks" = x"yes"; then + AC_DEFINE(USE_EXTERNAL_DEFAULT_CALLBACKS, 1, [Define this symbol if an external implementation of the default callbacks is used]) +fi if test x"$enable_experimental" = x"yes"; then AC_MSG_NOTICE([******]) @@ -482,7 +531,7 @@ AM_CONDITIONAL([USE_BENCHMARK], [test x"$use_benchmark" = x"yes"]) AM_CONDITIONAL([USE_ECMULT_STATIC_PRECOMPUTATION], [test x"$set_precomp" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_ECDH], [test x"$enable_module_ecdh" = x"yes"]) AM_CONDITIONAL([ENABLE_MODULE_RECOVERY], [test x"$enable_module_recovery" = x"yes"]) -AM_CONDITIONAL([USE_JNI], [test x"$use_jni" == x"yes"]) +AM_CONDITIONAL([USE_JNI], [test x"$use_jni" = x"yes"]) AM_CONDITIONAL([USE_EXTERNAL_ASM], [test x"$use_external_asm" = x"yes"]) AM_CONDITIONAL([USE_ASM_ARM], [test x"$set_asm" = x"arm"]) @@ -492,3 +541,26 @@ unset PKG_CONFIG_PATH PKG_CONFIG_PATH="$PKGCONFIG_PATH_TEMP" AC_OUTPUT + +echo +echo "Build Options:" +echo " with endomorphism = $use_endomorphism" +echo " with ecmult precomp = $set_precomp" +echo " with external callbacks = $use_external_default_callbacks" +echo " with jni = $use_jni" +echo " with benchmarks = $use_benchmark" +echo " with coverage = $enable_coverage" +echo " module ecdh = $enable_module_ecdh" +echo " module recovery = $enable_module_recovery" +echo +echo " asm = $set_asm" +echo " bignum = $set_bignum" +echo " field = $set_field" +echo " scalar = $set_scalar" +echo " ecmult window size = $set_ecmult_window" +echo +echo " CC = $CC" +echo " CFLAGS = $CFLAGS" +echo " CPPFLAGS = $CPPFLAGS" +echo " LDFLAGS = $LDFLAGS" +echo diff --git a/depend/secp256k1/contrib/lax_der_parsing.c b/depend/secp256k1/contrib/lax_der_parsing.c index 5b141a994..e177a0562 100644 --- a/depend/secp256k1/contrib/lax_der_parsing.c +++ b/depend/secp256k1/contrib/lax_der_parsing.c @@ -32,7 +32,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ lenbyte = input[pos++]; if (lenbyte & 0x80) { lenbyte -= 0x80; - if (pos + lenbyte > inputlen) { + if (lenbyte > inputlen - pos) { return 0; } pos += lenbyte; @@ -51,7 +51,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ lenbyte = input[pos++]; if (lenbyte & 0x80) { lenbyte -= 0x80; - if (pos + lenbyte > inputlen) { + if (lenbyte > inputlen - pos) { return 0; } while (lenbyte > 0 && input[pos] == 0) { @@ -89,7 +89,7 @@ int ecdsa_signature_parse_der_lax(const secp256k1_context* ctx, secp256k1_ecdsa_ lenbyte = input[pos++]; if (lenbyte & 0x80) { lenbyte -= 0x80; - if (pos + lenbyte > inputlen) { + if (lenbyte > inputlen - pos) { return 0; } while (lenbyte > 0 && input[pos] == 0) { diff --git a/depend/secp256k1/include/gmp.h b/depend/secp256k1/include/gmp.h new file mode 100644 index 000000000..0991e2bb4 --- /dev/null +++ b/depend/secp256k1/include/gmp.h @@ -0,0 +1,2329 @@ +/* Definitions for GNU multiple precision functions. -*- mode: c -*- + +Copyright 1991, 1993-1997, 1999-2016 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of either: + + * the GNU Lesser General Public License as published by the Free + Software Foundation; either version 3 of the License, or (at your + option) any later version. + +or + + * the GNU General Public License as published by the Free Software + Foundation; either version 2 of the License, or (at your option) any + later version. + +or both in parallel, as here. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +You should have received copies of the GNU General Public License and the +GNU Lesser General Public License along with the GNU MP Library. If not, +see https://www.gnu.org/licenses/. */ + +#ifndef __GMP_H__ + +#if defined (__cplusplus) +#include /* for std::istream, std::ostream, std::string */ +#include +#endif + + +/* Instantiated by configure. */ +#if ! defined (__GMP_WITHIN_CONFIGURE) +#define __GMP_HAVE_HOST_CPU_FAMILY_power 0 +#define __GMP_HAVE_HOST_CPU_FAMILY_powerpc 0 +#define GMP_LIMB_BITS 32 +#define GMP_NAIL_BITS 0 +#endif +#define GMP_NUMB_BITS (GMP_LIMB_BITS - GMP_NAIL_BITS) +#define GMP_NUMB_MASK ((~ __GMP_CAST (mp_limb_t, 0)) >> GMP_NAIL_BITS) +#define GMP_NUMB_MAX GMP_NUMB_MASK +#define GMP_NAIL_MASK (~ GMP_NUMB_MASK) + + +#ifndef __GNU_MP__ +#define __GNU_MP__ 6 + +#include /* for size_t */ +#include + +/* Instantiated by configure. */ +#if ! defined (__GMP_WITHIN_CONFIGURE) +/* #undef _LONG_LONG_LIMB */ +#define __GMP_LIBGMP_DLL 1 +#endif + + +/* __GMP_DECLSPEC supports Windows DLL versions of libgmp, and is empty in + all other circumstances. + + When compiling objects for libgmp, __GMP_DECLSPEC is an export directive, + or when compiling for an application it's an import directive. The two + cases are differentiated by __GMP_WITHIN_GMP defined by the GMP Makefiles + (and not defined from an application). + + __GMP_DECLSPEC_XX is similarly used for libgmpxx. __GMP_WITHIN_GMPXX + indicates when building libgmpxx, and in that case libgmpxx functions are + exports, but libgmp functions which might get called are imports. + + Libtool DLL_EXPORT define is not used. + + There's no attempt to support GMP built both static and DLL. Doing so + would mean applications would have to tell us which of the two is going + to be used when linking, and that seems very tedious and error prone if + using GMP by hand, and equally tedious from a package since autoconf and + automake don't give much help. + + __GMP_DECLSPEC is required on all documented global functions and + variables, the various internals in gmp-impl.h etc can be left unadorned. + But internals used by the test programs or speed measuring programs + should have __GMP_DECLSPEC, and certainly constants or variables must + have it or the wrong address will be resolved. + + In gcc __declspec can go at either the start or end of a prototype. + + In Microsoft C __declspec must go at the start, or after the type like + void __declspec(...) *foo()". There's no __dllexport or anything to + guard against someone foolish #defining dllexport. _export used to be + available, but no longer. + + In Borland C _export still exists, but needs to go after the type, like + "void _export foo();". Would have to change the __GMP_DECLSPEC syntax to + make use of that. Probably more trouble than it's worth. */ + +#if defined (__GNUC__) +#define __GMP_DECLSPEC_EXPORT __declspec(__dllexport__) +#define __GMP_DECLSPEC_IMPORT __declspec(__dllimport__) +#endif +#if defined (_MSC_VER) || defined (__BORLANDC__) +#define __GMP_DECLSPEC_EXPORT __declspec(dllexport) +#define __GMP_DECLSPEC_IMPORT __declspec(dllimport) +#endif +#ifdef __WATCOMC__ +#define __GMP_DECLSPEC_EXPORT __export +#define __GMP_DECLSPEC_IMPORT __import +#endif +#ifdef __IBMC__ +#define __GMP_DECLSPEC_EXPORT _Export +#define __GMP_DECLSPEC_IMPORT _Import +#endif + +#if __GMP_LIBGMP_DLL +#ifdef __GMP_WITHIN_GMP +/* compiling to go into a DLL libgmp */ +#define __GMP_DECLSPEC __GMP_DECLSPEC_EXPORT +#else +/* compiling to go into an application which will link to a DLL libgmp */ +#define __GMP_DECLSPEC __GMP_DECLSPEC_IMPORT +#endif +#else +/* all other cases */ +#define __GMP_DECLSPEC +#endif + + +#ifdef __GMP_SHORT_LIMB +typedef unsigned int mp_limb_t; +typedef int mp_limb_signed_t; +#else +#ifdef _LONG_LONG_LIMB +typedef unsigned long long int mp_limb_t; +typedef long long int mp_limb_signed_t; +#else +typedef unsigned long int mp_limb_t; +typedef long int mp_limb_signed_t; +#endif +#endif +typedef unsigned long int mp_bitcnt_t; + +/* For reference, note that the name __mpz_struct gets into C++ mangled + function names, which means although the "__" suggests an internal, we + must leave this name for binary compatibility. */ +typedef struct +{ + int _mp_alloc; /* Number of *limbs* allocated and pointed + to by the _mp_d field. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpz_struct; + +#endif /* __GNU_MP__ */ + + +typedef __mpz_struct MP_INT; /* gmp 1 source compatibility */ +typedef __mpz_struct mpz_t[1]; + +typedef mp_limb_t * mp_ptr; +typedef const mp_limb_t * mp_srcptr; +#if defined (_CRAY) && ! defined (_CRAYMPP) +/* plain `int' is much faster (48 bits) */ +#define __GMP_MP_SIZE_T_INT 1 +typedef int mp_size_t; +typedef int mp_exp_t; +#else +#define __GMP_MP_SIZE_T_INT 0 +typedef long int mp_size_t; +typedef long int mp_exp_t; +#endif + +typedef struct +{ + __mpz_struct _mp_num; + __mpz_struct _mp_den; +} __mpq_struct; + +typedef __mpq_struct MP_RAT; /* gmp 1 source compatibility */ +typedef __mpq_struct mpq_t[1]; + +typedef struct +{ + int _mp_prec; /* Max precision, in number of `mp_limb_t's. + Set by mpf_init and modified by + mpf_set_prec. The area pointed to by the + _mp_d field contains `prec' + 1 limbs. */ + int _mp_size; /* abs(_mp_size) is the number of limbs the + last field points to. If _mp_size is + negative this is a negative number. */ + mp_exp_t _mp_exp; /* Exponent, in the base of `mp_limb_t'. */ + mp_limb_t *_mp_d; /* Pointer to the limbs. */ +} __mpf_struct; + +/* typedef __mpf_struct MP_FLOAT; */ +typedef __mpf_struct mpf_t[1]; + +/* Available random number generation algorithms. */ +typedef enum +{ + GMP_RAND_ALG_DEFAULT = 0, + GMP_RAND_ALG_LC = GMP_RAND_ALG_DEFAULT /* Linear congruential. */ +} gmp_randalg_t; + +/* Random state struct. */ +typedef struct +{ + mpz_t _mp_seed; /* _mp_d member points to state of the generator. */ + gmp_randalg_t _mp_alg; /* Currently unused. */ + union { + void *_mp_lc; /* Pointer to function pointers structure. */ + } _mp_algdata; +} __gmp_randstate_struct; +typedef __gmp_randstate_struct gmp_randstate_t[1]; + +/* Types for function declarations in gmp files. */ +/* ??? Should not pollute user name space with these ??? */ +typedef const __mpz_struct *mpz_srcptr; +typedef __mpz_struct *mpz_ptr; +typedef const __mpf_struct *mpf_srcptr; +typedef __mpf_struct *mpf_ptr; +typedef const __mpq_struct *mpq_srcptr; +typedef __mpq_struct *mpq_ptr; + + +#if __GMP_LIBGMP_DLL +#ifdef __GMP_WITHIN_GMPXX +/* compiling to go into a DLL libgmpxx */ +#define __GMP_DECLSPEC_XX __GMP_DECLSPEC_EXPORT +#else +/* compiling to go into a application which will link to a DLL libgmpxx */ +#define __GMP_DECLSPEC_XX __GMP_DECLSPEC_IMPORT +#endif +#else +/* all other cases */ +#define __GMP_DECLSPEC_XX +#endif + + +#ifndef __MPN +#define __MPN(x) __gmpn_##x +#endif + +/* For reference, "defined(EOF)" cannot be used here. In g++ 2.95.4, + defines EOF but not FILE. */ +#if defined (FILE) \ + || defined (H_STDIO) \ + || defined (_H_STDIO) /* AIX */ \ + || defined (_STDIO_H) /* glibc, Sun, SCO */ \ + || defined (_STDIO_H_) /* BSD, OSF */ \ + || defined (__STDIO_H) /* Borland */ \ + || defined (__STDIO_H__) /* IRIX */ \ + || defined (_STDIO_INCLUDED) /* HPUX */ \ + || defined (__dj_include_stdio_h_) /* DJGPP */ \ + || defined (_FILE_DEFINED) /* Microsoft */ \ + || defined (__STDIO__) /* Apple MPW MrC */ \ + || defined (_MSL_STDIO_H) /* Metrowerks */ \ + || defined (_STDIO_H_INCLUDED) /* QNX4 */ \ + || defined (_ISO_STDIO_ISO_H) /* Sun C++ */ \ + || defined (__STDIO_LOADED) /* VMS */ \ + || defined (__DEFINED_FILE) /* musl */ +#define _GMP_H_HAVE_FILE 1 +#endif + +/* In ISO C, if a prototype involving "struct obstack *" is given without + that structure defined, then the struct is scoped down to just the + prototype, causing a conflict if it's subsequently defined for real. So + only give prototypes if we've got obstack.h. */ +#if defined (_OBSTACK_H) /* glibc */ +#define _GMP_H_HAVE_OBSTACK 1 +#endif + +/* The prototypes for gmp_vprintf etc are provided only if va_list is defined, + via an application having included . Usually va_list is a typedef + so can't be tested directly, but C99 specifies that va_start is a macro. + + will define some sort of va_list for vprintf and vfprintf, but + let's not bother trying to use that since it's not standard and since + application uses for gmp_vprintf etc will almost certainly require the + whole anyway. */ + +#ifdef va_start +#define _GMP_H_HAVE_VA_LIST 1 +#endif + +/* Test for gcc >= maj.min, as per __GNUC_PREREQ in glibc */ +#if defined (__GNUC__) && defined (__GNUC_MINOR__) +#define __GMP_GNUC_PREREQ(maj, min) \ + ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min)) +#else +#define __GMP_GNUC_PREREQ(maj, min) 0 +#endif + +/* "pure" is in gcc 2.96 and up, see "(gcc)Function Attributes". Basically + it means a function does nothing but examine its arguments and memory + (global or via arguments) to generate a return value, but changes nothing + and has no side-effects. __GMP_NO_ATTRIBUTE_CONST_PURE lets + tune/common.c etc turn this off when trying to write timing loops. */ +#if __GMP_GNUC_PREREQ (2,96) && ! defined (__GMP_NO_ATTRIBUTE_CONST_PURE) +#define __GMP_ATTRIBUTE_PURE __attribute__ ((__pure__)) +#else +#define __GMP_ATTRIBUTE_PURE +#endif + + +/* __GMP_CAST allows us to use static_cast in C++, so our macros are clean + to "g++ -Wold-style-cast". + + Casts in "extern inline" code within an extern "C" block don't induce + these warnings, so __GMP_CAST only needs to be used on documented + macros. */ + +#ifdef __cplusplus +#define __GMP_CAST(type, expr) (static_cast (expr)) +#else +#define __GMP_CAST(type, expr) ((type) (expr)) +#endif + + +/* An empty "throw ()" means the function doesn't throw any C++ exceptions, + this can save some stack frame info in applications. + + Currently it's given only on functions which never divide-by-zero etc, + don't allocate memory, and are expected to never need to allocate memory. + This leaves open the possibility of a C++ throw from a future GMP + exceptions scheme. + + mpz_set_ui etc are omitted to leave open the lazy allocation scheme + described in doc/tasks.html. mpz_get_d etc are omitted to leave open + exceptions for float overflows. + + Note that __GMP_NOTHROW must be given on any inlines the same as on their + prototypes (for g++ at least, where they're used together). Note also + that g++ 3.0 demands that __GMP_NOTHROW is before other attributes like + __GMP_ATTRIBUTE_PURE. */ + +#if defined (__cplusplus) +#define __GMP_NOTHROW throw () +#else +#define __GMP_NOTHROW +#endif + + +/* PORTME: What other compilers have a useful "extern inline"? "static + inline" would be an acceptable substitute if the compiler (or linker) + discards unused statics. */ + + /* gcc has __inline__ in all modes, including strict ansi. Give a prototype + for an inline too, so as to correctly specify "dllimport" on windows, in + case the function is called rather than inlined. + GCC 4.3 and above with -std=c99 or -std=gnu99 implements ISO C99 + inline semantics, unless -fgnu89-inline is used. */ +#ifdef __GNUC__ +#if (defined __GNUC_STDC_INLINE__) || (__GNUC__ == 4 && __GNUC_MINOR__ == 2) \ + || (defined __GNUC_GNU_INLINE__ && defined __cplusplus) +#define __GMP_EXTERN_INLINE extern __inline__ __attribute__ ((__gnu_inline__)) +#else +#define __GMP_EXTERN_INLINE extern __inline__ +#endif +#define __GMP_INLINE_PROTOTYPES 1 +#endif + +/* DEC C (eg. version 5.9) supports "static __inline foo()", even in -std1 + strict ANSI mode. Inlining is done even when not optimizing (ie. -O0 + mode, which is the default), but an unnecessary local copy of foo is + emitted unless -O is used. "extern __inline" is accepted, but the + "extern" appears to be ignored, ie. it becomes a plain global function + but which is inlined within its file. Don't know if all old versions of + DEC C supported __inline, but as a start let's do the right thing for + current versions. */ +#ifdef __DECC +#define __GMP_EXTERN_INLINE static __inline +#endif + +/* SCO OpenUNIX 8 cc supports "static inline foo()" but not in -Xc strict + ANSI mode (__STDC__ is 1 in that mode). Inlining only actually takes + place under -O. Without -O "foo" seems to be emitted whether it's used + or not, which is wasteful. "extern inline foo()" isn't useful, the + "extern" is apparently ignored, so foo is inlined if possible but also + emitted as a global, which causes multiple definition errors when + building a shared libgmp. */ +#ifdef __SCO_VERSION__ +#if __SCO_VERSION__ > 400000000 && __STDC__ != 1 \ + && ! defined (__GMP_EXTERN_INLINE) +#define __GMP_EXTERN_INLINE static inline +#endif +#endif + +/* Microsoft's C compiler accepts __inline */ +#ifdef _MSC_VER +#define __GMP_EXTERN_INLINE __inline +#endif + +/* Recent enough Sun C compilers want "inline" */ +#if defined (__SUNPRO_C) && __SUNPRO_C >= 0x560 \ + && ! defined (__GMP_EXTERN_INLINE) +#define __GMP_EXTERN_INLINE inline +#endif + +/* Somewhat older Sun C compilers want "static inline" */ +#if defined (__SUNPRO_C) && __SUNPRO_C >= 0x540 \ + && ! defined (__GMP_EXTERN_INLINE) +#define __GMP_EXTERN_INLINE static inline +#endif + + +/* C++ always has "inline" and since it's a normal feature the linker should + discard duplicate non-inlined copies, or if it doesn't then that's a + problem for everyone, not just GMP. */ +#if defined (__cplusplus) && ! defined (__GMP_EXTERN_INLINE) +#define __GMP_EXTERN_INLINE inline +#endif + +/* Don't do any inlining within a configure run, since if the compiler ends + up emitting copies of the code into the object file it can end up + demanding the various support routines (like mpn_popcount) for linking, + making the "alloca" test and perhaps others fail. And on hppa ia64 a + pre-release gcc 3.2 was seen not respecting the "extern" in "extern + __inline__", triggering this problem too. */ +#if defined (__GMP_WITHIN_CONFIGURE) && ! __GMP_WITHIN_CONFIGURE_INLINE +#undef __GMP_EXTERN_INLINE +#endif + +/* By default, don't give a prototype when there's going to be an inline + version. Note in particular that Cray C++ objects to the combination of + prototype and inline. */ +#ifdef __GMP_EXTERN_INLINE +#ifndef __GMP_INLINE_PROTOTYPES +#define __GMP_INLINE_PROTOTYPES 0 +#endif +#else +#define __GMP_INLINE_PROTOTYPES 1 +#endif + + +#define __GMP_ABS(x) ((x) >= 0 ? (x) : -(x)) +#define __GMP_MAX(h,i) ((h) > (i) ? (h) : (i)) + + +/* __builtin_expect is in gcc 3.0, and not in 2.95. */ +#if __GMP_GNUC_PREREQ (3,0) +#define __GMP_LIKELY(cond) __builtin_expect ((cond) != 0, 1) +#define __GMP_UNLIKELY(cond) __builtin_expect ((cond) != 0, 0) +#else +#define __GMP_LIKELY(cond) (cond) +#define __GMP_UNLIKELY(cond) (cond) +#endif + +#ifdef _CRAY +#define __GMP_CRAY_Pragma(str) _Pragma (str) +#else +#define __GMP_CRAY_Pragma(str) +#endif + + +/* Allow direct user access to numerator and denominator of an mpq_t object. */ +#define mpq_numref(Q) (&((Q)->_mp_num)) +#define mpq_denref(Q) (&((Q)->_mp_den)) + + +#if defined (__cplusplus) +extern "C" { +using std::FILE; +#endif + +#define mp_set_memory_functions __gmp_set_memory_functions +__GMP_DECLSPEC void mp_set_memory_functions (void *(*) (size_t), + void *(*) (void *, size_t, size_t), + void (*) (void *, size_t)) __GMP_NOTHROW; + +#define mp_get_memory_functions __gmp_get_memory_functions +__GMP_DECLSPEC void mp_get_memory_functions (void *(**) (size_t), + void *(**) (void *, size_t, size_t), + void (**) (void *, size_t)) __GMP_NOTHROW; + +#define mp_bits_per_limb __gmp_bits_per_limb +__GMP_DECLSPEC extern const int mp_bits_per_limb; + +#define gmp_errno __gmp_errno +__GMP_DECLSPEC extern int gmp_errno; + +#define gmp_version __gmp_version +__GMP_DECLSPEC extern const char * const gmp_version; + + +/**************** Random number routines. ****************/ + +/* obsolete */ +#define gmp_randinit __gmp_randinit +__GMP_DECLSPEC void gmp_randinit (gmp_randstate_t, gmp_randalg_t, ...); + +#define gmp_randinit_default __gmp_randinit_default +__GMP_DECLSPEC void gmp_randinit_default (gmp_randstate_t); + +#define gmp_randinit_lc_2exp __gmp_randinit_lc_2exp +__GMP_DECLSPEC void gmp_randinit_lc_2exp (gmp_randstate_t, mpz_srcptr, unsigned long int, mp_bitcnt_t); + +#define gmp_randinit_lc_2exp_size __gmp_randinit_lc_2exp_size +__GMP_DECLSPEC int gmp_randinit_lc_2exp_size (gmp_randstate_t, mp_bitcnt_t); + +#define gmp_randinit_mt __gmp_randinit_mt +__GMP_DECLSPEC void gmp_randinit_mt (gmp_randstate_t); + +#define gmp_randinit_set __gmp_randinit_set +__GMP_DECLSPEC void gmp_randinit_set (gmp_randstate_t, const __gmp_randstate_struct *); + +#define gmp_randseed __gmp_randseed +__GMP_DECLSPEC void gmp_randseed (gmp_randstate_t, mpz_srcptr); + +#define gmp_randseed_ui __gmp_randseed_ui +__GMP_DECLSPEC void gmp_randseed_ui (gmp_randstate_t, unsigned long int); + +#define gmp_randclear __gmp_randclear +__GMP_DECLSPEC void gmp_randclear (gmp_randstate_t); + +#define gmp_urandomb_ui __gmp_urandomb_ui +__GMP_DECLSPEC unsigned long gmp_urandomb_ui (gmp_randstate_t, unsigned long); + +#define gmp_urandomm_ui __gmp_urandomm_ui +__GMP_DECLSPEC unsigned long gmp_urandomm_ui (gmp_randstate_t, unsigned long); + + +/**************** Formatted output routines. ****************/ + +#define gmp_asprintf __gmp_asprintf +__GMP_DECLSPEC int gmp_asprintf (char **, const char *, ...); + +#define gmp_fprintf __gmp_fprintf +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC int gmp_fprintf (FILE *, const char *, ...); +#endif + +#define gmp_obstack_printf __gmp_obstack_printf +#if defined (_GMP_H_HAVE_OBSTACK) +__GMP_DECLSPEC int gmp_obstack_printf (struct obstack *, const char *, ...); +#endif + +#define gmp_obstack_vprintf __gmp_obstack_vprintf +#if defined (_GMP_H_HAVE_OBSTACK) && defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_obstack_vprintf (struct obstack *, const char *, va_list); +#endif + +#define gmp_printf __gmp_printf +__GMP_DECLSPEC int gmp_printf (const char *, ...); + +#define gmp_snprintf __gmp_snprintf +__GMP_DECLSPEC int gmp_snprintf (char *, size_t, const char *, ...); + +#define gmp_sprintf __gmp_sprintf +__GMP_DECLSPEC int gmp_sprintf (char *, const char *, ...); + +#define gmp_vasprintf __gmp_vasprintf +#if defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vasprintf (char **, const char *, va_list); +#endif + +#define gmp_vfprintf __gmp_vfprintf +#if defined (_GMP_H_HAVE_FILE) && defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vfprintf (FILE *, const char *, va_list); +#endif + +#define gmp_vprintf __gmp_vprintf +#if defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vprintf (const char *, va_list); +#endif + +#define gmp_vsnprintf __gmp_vsnprintf +#if defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vsnprintf (char *, size_t, const char *, va_list); +#endif + +#define gmp_vsprintf __gmp_vsprintf +#if defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vsprintf (char *, const char *, va_list); +#endif + + +/**************** Formatted input routines. ****************/ + +#define gmp_fscanf __gmp_fscanf +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC int gmp_fscanf (FILE *, const char *, ...); +#endif + +#define gmp_scanf __gmp_scanf +__GMP_DECLSPEC int gmp_scanf (const char *, ...); + +#define gmp_sscanf __gmp_sscanf +__GMP_DECLSPEC int gmp_sscanf (const char *, const char *, ...); + +#define gmp_vfscanf __gmp_vfscanf +#if defined (_GMP_H_HAVE_FILE) && defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vfscanf (FILE *, const char *, va_list); +#endif + +#define gmp_vscanf __gmp_vscanf +#if defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vscanf (const char *, va_list); +#endif + +#define gmp_vsscanf __gmp_vsscanf +#if defined (_GMP_H_HAVE_VA_LIST) +__GMP_DECLSPEC int gmp_vsscanf (const char *, const char *, va_list); +#endif + + +/**************** Integer (i.e. Z) routines. ****************/ + +#define _mpz_realloc __gmpz_realloc +#define mpz_realloc __gmpz_realloc +__GMP_DECLSPEC void *_mpz_realloc (mpz_ptr, mp_size_t); + +#define mpz_abs __gmpz_abs +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_abs) +__GMP_DECLSPEC void mpz_abs (mpz_ptr, mpz_srcptr); +#endif + +#define mpz_add __gmpz_add +__GMP_DECLSPEC void mpz_add (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_add_ui __gmpz_add_ui +__GMP_DECLSPEC void mpz_add_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_addmul __gmpz_addmul +__GMP_DECLSPEC void mpz_addmul (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_addmul_ui __gmpz_addmul_ui +__GMP_DECLSPEC void mpz_addmul_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_and __gmpz_and +__GMP_DECLSPEC void mpz_and (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_array_init __gmpz_array_init +__GMP_DECLSPEC void mpz_array_init (mpz_ptr, mp_size_t, mp_size_t); + +#define mpz_bin_ui __gmpz_bin_ui +__GMP_DECLSPEC void mpz_bin_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_bin_uiui __gmpz_bin_uiui +__GMP_DECLSPEC void mpz_bin_uiui (mpz_ptr, unsigned long int, unsigned long int); + +#define mpz_cdiv_q __gmpz_cdiv_q +__GMP_DECLSPEC void mpz_cdiv_q (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_cdiv_q_2exp __gmpz_cdiv_q_2exp +__GMP_DECLSPEC void mpz_cdiv_q_2exp (mpz_ptr, mpz_srcptr, mp_bitcnt_t); + +#define mpz_cdiv_q_ui __gmpz_cdiv_q_ui +__GMP_DECLSPEC unsigned long int mpz_cdiv_q_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_cdiv_qr __gmpz_cdiv_qr +__GMP_DECLSPEC void mpz_cdiv_qr (mpz_ptr, mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_cdiv_qr_ui __gmpz_cdiv_qr_ui +__GMP_DECLSPEC unsigned long int mpz_cdiv_qr_ui (mpz_ptr, mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_cdiv_r __gmpz_cdiv_r +__GMP_DECLSPEC void mpz_cdiv_r (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_cdiv_r_2exp __gmpz_cdiv_r_2exp +__GMP_DECLSPEC void mpz_cdiv_r_2exp (mpz_ptr, mpz_srcptr, mp_bitcnt_t); + +#define mpz_cdiv_r_ui __gmpz_cdiv_r_ui +__GMP_DECLSPEC unsigned long int mpz_cdiv_r_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_cdiv_ui __gmpz_cdiv_ui +__GMP_DECLSPEC unsigned long int mpz_cdiv_ui (mpz_srcptr, unsigned long int) __GMP_ATTRIBUTE_PURE; + +#define mpz_clear __gmpz_clear +__GMP_DECLSPEC void mpz_clear (mpz_ptr); + +#define mpz_clears __gmpz_clears +__GMP_DECLSPEC void mpz_clears (mpz_ptr, ...); + +#define mpz_clrbit __gmpz_clrbit +__GMP_DECLSPEC void mpz_clrbit (mpz_ptr, mp_bitcnt_t); + +#define mpz_cmp __gmpz_cmp +__GMP_DECLSPEC int mpz_cmp (mpz_srcptr, mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_cmp_d __gmpz_cmp_d +__GMP_DECLSPEC int mpz_cmp_d (mpz_srcptr, double) __GMP_ATTRIBUTE_PURE; + +#define _mpz_cmp_si __gmpz_cmp_si +__GMP_DECLSPEC int _mpz_cmp_si (mpz_srcptr, signed long int) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define _mpz_cmp_ui __gmpz_cmp_ui +__GMP_DECLSPEC int _mpz_cmp_ui (mpz_srcptr, unsigned long int) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_cmpabs __gmpz_cmpabs +__GMP_DECLSPEC int mpz_cmpabs (mpz_srcptr, mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_cmpabs_d __gmpz_cmpabs_d +__GMP_DECLSPEC int mpz_cmpabs_d (mpz_srcptr, double) __GMP_ATTRIBUTE_PURE; + +#define mpz_cmpabs_ui __gmpz_cmpabs_ui +__GMP_DECLSPEC int mpz_cmpabs_ui (mpz_srcptr, unsigned long int) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_com __gmpz_com +__GMP_DECLSPEC void mpz_com (mpz_ptr, mpz_srcptr); + +#define mpz_combit __gmpz_combit +__GMP_DECLSPEC void mpz_combit (mpz_ptr, mp_bitcnt_t); + +#define mpz_congruent_p __gmpz_congruent_p +__GMP_DECLSPEC int mpz_congruent_p (mpz_srcptr, mpz_srcptr, mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpz_congruent_2exp_p __gmpz_congruent_2exp_p +__GMP_DECLSPEC int mpz_congruent_2exp_p (mpz_srcptr, mpz_srcptr, mp_bitcnt_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_congruent_ui_p __gmpz_congruent_ui_p +__GMP_DECLSPEC int mpz_congruent_ui_p (mpz_srcptr, unsigned long, unsigned long) __GMP_ATTRIBUTE_PURE; + +#define mpz_divexact __gmpz_divexact +__GMP_DECLSPEC void mpz_divexact (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_divexact_ui __gmpz_divexact_ui +__GMP_DECLSPEC void mpz_divexact_ui (mpz_ptr, mpz_srcptr, unsigned long); + +#define mpz_divisible_p __gmpz_divisible_p +__GMP_DECLSPEC int mpz_divisible_p (mpz_srcptr, mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpz_divisible_ui_p __gmpz_divisible_ui_p +__GMP_DECLSPEC int mpz_divisible_ui_p (mpz_srcptr, unsigned long) __GMP_ATTRIBUTE_PURE; + +#define mpz_divisible_2exp_p __gmpz_divisible_2exp_p +__GMP_DECLSPEC int mpz_divisible_2exp_p (mpz_srcptr, mp_bitcnt_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_dump __gmpz_dump +__GMP_DECLSPEC void mpz_dump (mpz_srcptr); + +#define mpz_export __gmpz_export +__GMP_DECLSPEC void *mpz_export (void *, size_t *, int, size_t, int, size_t, mpz_srcptr); + +#define mpz_fac_ui __gmpz_fac_ui +__GMP_DECLSPEC void mpz_fac_ui (mpz_ptr, unsigned long int); + +#define mpz_2fac_ui __gmpz_2fac_ui +__GMP_DECLSPEC void mpz_2fac_ui (mpz_ptr, unsigned long int); + +#define mpz_mfac_uiui __gmpz_mfac_uiui +__GMP_DECLSPEC void mpz_mfac_uiui (mpz_ptr, unsigned long int, unsigned long int); + +#define mpz_primorial_ui __gmpz_primorial_ui +__GMP_DECLSPEC void mpz_primorial_ui (mpz_ptr, unsigned long int); + +#define mpz_fdiv_q __gmpz_fdiv_q +__GMP_DECLSPEC void mpz_fdiv_q (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_fdiv_q_2exp __gmpz_fdiv_q_2exp +__GMP_DECLSPEC void mpz_fdiv_q_2exp (mpz_ptr, mpz_srcptr, mp_bitcnt_t); + +#define mpz_fdiv_q_ui __gmpz_fdiv_q_ui +__GMP_DECLSPEC unsigned long int mpz_fdiv_q_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_fdiv_qr __gmpz_fdiv_qr +__GMP_DECLSPEC void mpz_fdiv_qr (mpz_ptr, mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_fdiv_qr_ui __gmpz_fdiv_qr_ui +__GMP_DECLSPEC unsigned long int mpz_fdiv_qr_ui (mpz_ptr, mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_fdiv_r __gmpz_fdiv_r +__GMP_DECLSPEC void mpz_fdiv_r (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_fdiv_r_2exp __gmpz_fdiv_r_2exp +__GMP_DECLSPEC void mpz_fdiv_r_2exp (mpz_ptr, mpz_srcptr, mp_bitcnt_t); + +#define mpz_fdiv_r_ui __gmpz_fdiv_r_ui +__GMP_DECLSPEC unsigned long int mpz_fdiv_r_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_fdiv_ui __gmpz_fdiv_ui +__GMP_DECLSPEC unsigned long int mpz_fdiv_ui (mpz_srcptr, unsigned long int) __GMP_ATTRIBUTE_PURE; + +#define mpz_fib_ui __gmpz_fib_ui +__GMP_DECLSPEC void mpz_fib_ui (mpz_ptr, unsigned long int); + +#define mpz_fib2_ui __gmpz_fib2_ui +__GMP_DECLSPEC void mpz_fib2_ui (mpz_ptr, mpz_ptr, unsigned long int); + +#define mpz_fits_sint_p __gmpz_fits_sint_p +__GMP_DECLSPEC int mpz_fits_sint_p (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_fits_slong_p __gmpz_fits_slong_p +__GMP_DECLSPEC int mpz_fits_slong_p (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_fits_sshort_p __gmpz_fits_sshort_p +__GMP_DECLSPEC int mpz_fits_sshort_p (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_fits_uint_p __gmpz_fits_uint_p +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_fits_uint_p) +__GMP_DECLSPEC int mpz_fits_uint_p (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_fits_ulong_p __gmpz_fits_ulong_p +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_fits_ulong_p) +__GMP_DECLSPEC int mpz_fits_ulong_p (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_fits_ushort_p __gmpz_fits_ushort_p +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_fits_ushort_p) +__GMP_DECLSPEC int mpz_fits_ushort_p (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_gcd __gmpz_gcd +__GMP_DECLSPEC void mpz_gcd (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_gcd_ui __gmpz_gcd_ui +__GMP_DECLSPEC unsigned long int mpz_gcd_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_gcdext __gmpz_gcdext +__GMP_DECLSPEC void mpz_gcdext (mpz_ptr, mpz_ptr, mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_get_d __gmpz_get_d +__GMP_DECLSPEC double mpz_get_d (mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpz_get_d_2exp __gmpz_get_d_2exp +__GMP_DECLSPEC double mpz_get_d_2exp (signed long int *, mpz_srcptr); + +#define mpz_get_si __gmpz_get_si +__GMP_DECLSPEC /* signed */ long int mpz_get_si (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_get_str __gmpz_get_str +__GMP_DECLSPEC char *mpz_get_str (char *, int, mpz_srcptr); + +#define mpz_get_ui __gmpz_get_ui +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_get_ui) +__GMP_DECLSPEC unsigned long int mpz_get_ui (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_getlimbn __gmpz_getlimbn +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_getlimbn) +__GMP_DECLSPEC mp_limb_t mpz_getlimbn (mpz_srcptr, mp_size_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_hamdist __gmpz_hamdist +__GMP_DECLSPEC mp_bitcnt_t mpz_hamdist (mpz_srcptr, mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_import __gmpz_import +__GMP_DECLSPEC void mpz_import (mpz_ptr, size_t, int, size_t, int, size_t, const void *); + +#define mpz_init __gmpz_init +__GMP_DECLSPEC void mpz_init (mpz_ptr); + +#define mpz_init2 __gmpz_init2 +__GMP_DECLSPEC void mpz_init2 (mpz_ptr, mp_bitcnt_t); + +#define mpz_inits __gmpz_inits +__GMP_DECLSPEC void mpz_inits (mpz_ptr, ...); + +#define mpz_init_set __gmpz_init_set +__GMP_DECLSPEC void mpz_init_set (mpz_ptr, mpz_srcptr); + +#define mpz_init_set_d __gmpz_init_set_d +__GMP_DECLSPEC void mpz_init_set_d (mpz_ptr, double); + +#define mpz_init_set_si __gmpz_init_set_si +__GMP_DECLSPEC void mpz_init_set_si (mpz_ptr, signed long int); + +#define mpz_init_set_str __gmpz_init_set_str +__GMP_DECLSPEC int mpz_init_set_str (mpz_ptr, const char *, int); + +#define mpz_init_set_ui __gmpz_init_set_ui +__GMP_DECLSPEC void mpz_init_set_ui (mpz_ptr, unsigned long int); + +#define mpz_inp_raw __gmpz_inp_raw +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpz_inp_raw (mpz_ptr, FILE *); +#endif + +#define mpz_inp_str __gmpz_inp_str +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpz_inp_str (mpz_ptr, FILE *, int); +#endif + +#define mpz_invert __gmpz_invert +__GMP_DECLSPEC int mpz_invert (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_ior __gmpz_ior +__GMP_DECLSPEC void mpz_ior (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_jacobi __gmpz_jacobi +__GMP_DECLSPEC int mpz_jacobi (mpz_srcptr, mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpz_kronecker mpz_jacobi /* alias */ + +#define mpz_kronecker_si __gmpz_kronecker_si +__GMP_DECLSPEC int mpz_kronecker_si (mpz_srcptr, long) __GMP_ATTRIBUTE_PURE; + +#define mpz_kronecker_ui __gmpz_kronecker_ui +__GMP_DECLSPEC int mpz_kronecker_ui (mpz_srcptr, unsigned long) __GMP_ATTRIBUTE_PURE; + +#define mpz_si_kronecker __gmpz_si_kronecker +__GMP_DECLSPEC int mpz_si_kronecker (long, mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpz_ui_kronecker __gmpz_ui_kronecker +__GMP_DECLSPEC int mpz_ui_kronecker (unsigned long, mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpz_lcm __gmpz_lcm +__GMP_DECLSPEC void mpz_lcm (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_lcm_ui __gmpz_lcm_ui +__GMP_DECLSPEC void mpz_lcm_ui (mpz_ptr, mpz_srcptr, unsigned long); + +#define mpz_legendre mpz_jacobi /* alias */ + +#define mpz_lucnum_ui __gmpz_lucnum_ui +__GMP_DECLSPEC void mpz_lucnum_ui (mpz_ptr, unsigned long int); + +#define mpz_lucnum2_ui __gmpz_lucnum2_ui +__GMP_DECLSPEC void mpz_lucnum2_ui (mpz_ptr, mpz_ptr, unsigned long int); + +#define mpz_millerrabin __gmpz_millerrabin +__GMP_DECLSPEC int mpz_millerrabin (mpz_srcptr, int) __GMP_ATTRIBUTE_PURE; + +#define mpz_mod __gmpz_mod +__GMP_DECLSPEC void mpz_mod (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_mod_ui mpz_fdiv_r_ui /* same as fdiv_r because divisor unsigned */ + +#define mpz_mul __gmpz_mul +__GMP_DECLSPEC void mpz_mul (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_mul_2exp __gmpz_mul_2exp +__GMP_DECLSPEC void mpz_mul_2exp (mpz_ptr, mpz_srcptr, mp_bitcnt_t); + +#define mpz_mul_si __gmpz_mul_si +__GMP_DECLSPEC void mpz_mul_si (mpz_ptr, mpz_srcptr, long int); + +#define mpz_mul_ui __gmpz_mul_ui +__GMP_DECLSPEC void mpz_mul_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_neg __gmpz_neg +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_neg) +__GMP_DECLSPEC void mpz_neg (mpz_ptr, mpz_srcptr); +#endif + +#define mpz_nextprime __gmpz_nextprime +__GMP_DECLSPEC void mpz_nextprime (mpz_ptr, mpz_srcptr); + +#define mpz_out_raw __gmpz_out_raw +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpz_out_raw (FILE *, mpz_srcptr); +#endif + +#define mpz_out_str __gmpz_out_str +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpz_out_str (FILE *, int, mpz_srcptr); +#endif + +#define mpz_perfect_power_p __gmpz_perfect_power_p +__GMP_DECLSPEC int mpz_perfect_power_p (mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpz_perfect_square_p __gmpz_perfect_square_p +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_perfect_square_p) +__GMP_DECLSPEC int mpz_perfect_square_p (mpz_srcptr) __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_popcount __gmpz_popcount +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_popcount) +__GMP_DECLSPEC mp_bitcnt_t mpz_popcount (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_pow_ui __gmpz_pow_ui +__GMP_DECLSPEC void mpz_pow_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_powm __gmpz_powm +__GMP_DECLSPEC void mpz_powm (mpz_ptr, mpz_srcptr, mpz_srcptr, mpz_srcptr); + +#define mpz_powm_sec __gmpz_powm_sec +__GMP_DECLSPEC void mpz_powm_sec (mpz_ptr, mpz_srcptr, mpz_srcptr, mpz_srcptr); + +#define mpz_powm_ui __gmpz_powm_ui +__GMP_DECLSPEC void mpz_powm_ui (mpz_ptr, mpz_srcptr, unsigned long int, mpz_srcptr); + +#define mpz_probab_prime_p __gmpz_probab_prime_p +__GMP_DECLSPEC int mpz_probab_prime_p (mpz_srcptr, int) __GMP_ATTRIBUTE_PURE; + +#define mpz_random __gmpz_random +__GMP_DECLSPEC void mpz_random (mpz_ptr, mp_size_t); + +#define mpz_random2 __gmpz_random2 +__GMP_DECLSPEC void mpz_random2 (mpz_ptr, mp_size_t); + +#define mpz_realloc2 __gmpz_realloc2 +__GMP_DECLSPEC void mpz_realloc2 (mpz_ptr, mp_bitcnt_t); + +#define mpz_remove __gmpz_remove +__GMP_DECLSPEC mp_bitcnt_t mpz_remove (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_root __gmpz_root +__GMP_DECLSPEC int mpz_root (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_rootrem __gmpz_rootrem +__GMP_DECLSPEC void mpz_rootrem (mpz_ptr, mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_rrandomb __gmpz_rrandomb +__GMP_DECLSPEC void mpz_rrandomb (mpz_ptr, gmp_randstate_t, mp_bitcnt_t); + +#define mpz_scan0 __gmpz_scan0 +__GMP_DECLSPEC mp_bitcnt_t mpz_scan0 (mpz_srcptr, mp_bitcnt_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_scan1 __gmpz_scan1 +__GMP_DECLSPEC mp_bitcnt_t mpz_scan1 (mpz_srcptr, mp_bitcnt_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_set __gmpz_set +__GMP_DECLSPEC void mpz_set (mpz_ptr, mpz_srcptr); + +#define mpz_set_d __gmpz_set_d +__GMP_DECLSPEC void mpz_set_d (mpz_ptr, double); + +#define mpz_set_f __gmpz_set_f +__GMP_DECLSPEC void mpz_set_f (mpz_ptr, mpf_srcptr); + +#define mpz_set_q __gmpz_set_q +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_set_q) +__GMP_DECLSPEC void mpz_set_q (mpz_ptr, mpq_srcptr); +#endif + +#define mpz_set_si __gmpz_set_si +__GMP_DECLSPEC void mpz_set_si (mpz_ptr, signed long int); + +#define mpz_set_str __gmpz_set_str +__GMP_DECLSPEC int mpz_set_str (mpz_ptr, const char *, int); + +#define mpz_set_ui __gmpz_set_ui +__GMP_DECLSPEC void mpz_set_ui (mpz_ptr, unsigned long int); + +#define mpz_setbit __gmpz_setbit +__GMP_DECLSPEC void mpz_setbit (mpz_ptr, mp_bitcnt_t); + +#define mpz_size __gmpz_size +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpz_size) +__GMP_DECLSPEC size_t mpz_size (mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpz_sizeinbase __gmpz_sizeinbase +__GMP_DECLSPEC size_t mpz_sizeinbase (mpz_srcptr, int) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_sqrt __gmpz_sqrt +__GMP_DECLSPEC void mpz_sqrt (mpz_ptr, mpz_srcptr); + +#define mpz_sqrtrem __gmpz_sqrtrem +__GMP_DECLSPEC void mpz_sqrtrem (mpz_ptr, mpz_ptr, mpz_srcptr); + +#define mpz_sub __gmpz_sub +__GMP_DECLSPEC void mpz_sub (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_sub_ui __gmpz_sub_ui +__GMP_DECLSPEC void mpz_sub_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_ui_sub __gmpz_ui_sub +__GMP_DECLSPEC void mpz_ui_sub (mpz_ptr, unsigned long int, mpz_srcptr); + +#define mpz_submul __gmpz_submul +__GMP_DECLSPEC void mpz_submul (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_submul_ui __gmpz_submul_ui +__GMP_DECLSPEC void mpz_submul_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_swap __gmpz_swap +__GMP_DECLSPEC void mpz_swap (mpz_ptr, mpz_ptr) __GMP_NOTHROW; + +#define mpz_tdiv_ui __gmpz_tdiv_ui +__GMP_DECLSPEC unsigned long int mpz_tdiv_ui (mpz_srcptr, unsigned long int) __GMP_ATTRIBUTE_PURE; + +#define mpz_tdiv_q __gmpz_tdiv_q +__GMP_DECLSPEC void mpz_tdiv_q (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_tdiv_q_2exp __gmpz_tdiv_q_2exp +__GMP_DECLSPEC void mpz_tdiv_q_2exp (mpz_ptr, mpz_srcptr, mp_bitcnt_t); + +#define mpz_tdiv_q_ui __gmpz_tdiv_q_ui +__GMP_DECLSPEC unsigned long int mpz_tdiv_q_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_tdiv_qr __gmpz_tdiv_qr +__GMP_DECLSPEC void mpz_tdiv_qr (mpz_ptr, mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_tdiv_qr_ui __gmpz_tdiv_qr_ui +__GMP_DECLSPEC unsigned long int mpz_tdiv_qr_ui (mpz_ptr, mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_tdiv_r __gmpz_tdiv_r +__GMP_DECLSPEC void mpz_tdiv_r (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_tdiv_r_2exp __gmpz_tdiv_r_2exp +__GMP_DECLSPEC void mpz_tdiv_r_2exp (mpz_ptr, mpz_srcptr, mp_bitcnt_t); + +#define mpz_tdiv_r_ui __gmpz_tdiv_r_ui +__GMP_DECLSPEC unsigned long int mpz_tdiv_r_ui (mpz_ptr, mpz_srcptr, unsigned long int); + +#define mpz_tstbit __gmpz_tstbit +__GMP_DECLSPEC int mpz_tstbit (mpz_srcptr, mp_bitcnt_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpz_ui_pow_ui __gmpz_ui_pow_ui +__GMP_DECLSPEC void mpz_ui_pow_ui (mpz_ptr, unsigned long int, unsigned long int); + +#define mpz_urandomb __gmpz_urandomb +__GMP_DECLSPEC void mpz_urandomb (mpz_ptr, gmp_randstate_t, mp_bitcnt_t); + +#define mpz_urandomm __gmpz_urandomm +__GMP_DECLSPEC void mpz_urandomm (mpz_ptr, gmp_randstate_t, mpz_srcptr); + +#define mpz_xor __gmpz_xor +#define mpz_eor __gmpz_xor +__GMP_DECLSPEC void mpz_xor (mpz_ptr, mpz_srcptr, mpz_srcptr); + +#define mpz_limbs_read __gmpz_limbs_read +__GMP_DECLSPEC mp_srcptr mpz_limbs_read (mpz_srcptr); + +#define mpz_limbs_write __gmpz_limbs_write +__GMP_DECLSPEC mp_ptr mpz_limbs_write (mpz_ptr, mp_size_t); + +#define mpz_limbs_modify __gmpz_limbs_modify +__GMP_DECLSPEC mp_ptr mpz_limbs_modify (mpz_ptr, mp_size_t); + +#define mpz_limbs_finish __gmpz_limbs_finish +__GMP_DECLSPEC void mpz_limbs_finish (mpz_ptr, mp_size_t); + +#define mpz_roinit_n __gmpz_roinit_n +__GMP_DECLSPEC mpz_srcptr mpz_roinit_n (mpz_ptr, mp_srcptr, mp_size_t); + +#define MPZ_ROINIT_N(xp, xs) {{0, (xs),(xp) }} + +/**************** Rational (i.e. Q) routines. ****************/ + +#define mpq_abs __gmpq_abs +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpq_abs) +__GMP_DECLSPEC void mpq_abs (mpq_ptr, mpq_srcptr); +#endif + +#define mpq_add __gmpq_add +__GMP_DECLSPEC void mpq_add (mpq_ptr, mpq_srcptr, mpq_srcptr); + +#define mpq_canonicalize __gmpq_canonicalize +__GMP_DECLSPEC void mpq_canonicalize (mpq_ptr); + +#define mpq_clear __gmpq_clear +__GMP_DECLSPEC void mpq_clear (mpq_ptr); + +#define mpq_clears __gmpq_clears +__GMP_DECLSPEC void mpq_clears (mpq_ptr, ...); + +#define mpq_cmp __gmpq_cmp +__GMP_DECLSPEC int mpq_cmp (mpq_srcptr, mpq_srcptr) __GMP_ATTRIBUTE_PURE; + +#define _mpq_cmp_si __gmpq_cmp_si +__GMP_DECLSPEC int _mpq_cmp_si (mpq_srcptr, long, unsigned long) __GMP_ATTRIBUTE_PURE; + +#define _mpq_cmp_ui __gmpq_cmp_ui +__GMP_DECLSPEC int _mpq_cmp_ui (mpq_srcptr, unsigned long int, unsigned long int) __GMP_ATTRIBUTE_PURE; + +#define mpq_cmp_z __gmpq_cmp_z +__GMP_DECLSPEC int mpq_cmp_z (mpq_srcptr, mpz_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpq_div __gmpq_div +__GMP_DECLSPEC void mpq_div (mpq_ptr, mpq_srcptr, mpq_srcptr); + +#define mpq_div_2exp __gmpq_div_2exp +__GMP_DECLSPEC void mpq_div_2exp (mpq_ptr, mpq_srcptr, mp_bitcnt_t); + +#define mpq_equal __gmpq_equal +__GMP_DECLSPEC int mpq_equal (mpq_srcptr, mpq_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpq_get_num __gmpq_get_num +__GMP_DECLSPEC void mpq_get_num (mpz_ptr, mpq_srcptr); + +#define mpq_get_den __gmpq_get_den +__GMP_DECLSPEC void mpq_get_den (mpz_ptr, mpq_srcptr); + +#define mpq_get_d __gmpq_get_d +__GMP_DECLSPEC double mpq_get_d (mpq_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpq_get_str __gmpq_get_str +__GMP_DECLSPEC char *mpq_get_str (char *, int, mpq_srcptr); + +#define mpq_init __gmpq_init +__GMP_DECLSPEC void mpq_init (mpq_ptr); + +#define mpq_inits __gmpq_inits +__GMP_DECLSPEC void mpq_inits (mpq_ptr, ...); + +#define mpq_inp_str __gmpq_inp_str +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpq_inp_str (mpq_ptr, FILE *, int); +#endif + +#define mpq_inv __gmpq_inv +__GMP_DECLSPEC void mpq_inv (mpq_ptr, mpq_srcptr); + +#define mpq_mul __gmpq_mul +__GMP_DECLSPEC void mpq_mul (mpq_ptr, mpq_srcptr, mpq_srcptr); + +#define mpq_mul_2exp __gmpq_mul_2exp +__GMP_DECLSPEC void mpq_mul_2exp (mpq_ptr, mpq_srcptr, mp_bitcnt_t); + +#define mpq_neg __gmpq_neg +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpq_neg) +__GMP_DECLSPEC void mpq_neg (mpq_ptr, mpq_srcptr); +#endif + +#define mpq_out_str __gmpq_out_str +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpq_out_str (FILE *, int, mpq_srcptr); +#endif + +#define mpq_set __gmpq_set +__GMP_DECLSPEC void mpq_set (mpq_ptr, mpq_srcptr); + +#define mpq_set_d __gmpq_set_d +__GMP_DECLSPEC void mpq_set_d (mpq_ptr, double); + +#define mpq_set_den __gmpq_set_den +__GMP_DECLSPEC void mpq_set_den (mpq_ptr, mpz_srcptr); + +#define mpq_set_f __gmpq_set_f +__GMP_DECLSPEC void mpq_set_f (mpq_ptr, mpf_srcptr); + +#define mpq_set_num __gmpq_set_num +__GMP_DECLSPEC void mpq_set_num (mpq_ptr, mpz_srcptr); + +#define mpq_set_si __gmpq_set_si +__GMP_DECLSPEC void mpq_set_si (mpq_ptr, signed long int, unsigned long int); + +#define mpq_set_str __gmpq_set_str +__GMP_DECLSPEC int mpq_set_str (mpq_ptr, const char *, int); + +#define mpq_set_ui __gmpq_set_ui +__GMP_DECLSPEC void mpq_set_ui (mpq_ptr, unsigned long int, unsigned long int); + +#define mpq_set_z __gmpq_set_z +__GMP_DECLSPEC void mpq_set_z (mpq_ptr, mpz_srcptr); + +#define mpq_sub __gmpq_sub +__GMP_DECLSPEC void mpq_sub (mpq_ptr, mpq_srcptr, mpq_srcptr); + +#define mpq_swap __gmpq_swap +__GMP_DECLSPEC void mpq_swap (mpq_ptr, mpq_ptr) __GMP_NOTHROW; + + +/**************** Float (i.e. F) routines. ****************/ + +#define mpf_abs __gmpf_abs +__GMP_DECLSPEC void mpf_abs (mpf_ptr, mpf_srcptr); + +#define mpf_add __gmpf_add +__GMP_DECLSPEC void mpf_add (mpf_ptr, mpf_srcptr, mpf_srcptr); + +#define mpf_add_ui __gmpf_add_ui +__GMP_DECLSPEC void mpf_add_ui (mpf_ptr, mpf_srcptr, unsigned long int); +#define mpf_ceil __gmpf_ceil +__GMP_DECLSPEC void mpf_ceil (mpf_ptr, mpf_srcptr); + +#define mpf_clear __gmpf_clear +__GMP_DECLSPEC void mpf_clear (mpf_ptr); + +#define mpf_clears __gmpf_clears +__GMP_DECLSPEC void mpf_clears (mpf_ptr, ...); + +#define mpf_cmp __gmpf_cmp +__GMP_DECLSPEC int mpf_cmp (mpf_srcptr, mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_cmp_z __gmpf_cmp_z +__GMP_DECLSPEC int mpf_cmp_z (mpf_srcptr, mpz_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_cmp_d __gmpf_cmp_d +__GMP_DECLSPEC int mpf_cmp_d (mpf_srcptr, double) __GMP_ATTRIBUTE_PURE; + +#define mpf_cmp_si __gmpf_cmp_si +__GMP_DECLSPEC int mpf_cmp_si (mpf_srcptr, signed long int) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_cmp_ui __gmpf_cmp_ui +__GMP_DECLSPEC int mpf_cmp_ui (mpf_srcptr, unsigned long int) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_div __gmpf_div +__GMP_DECLSPEC void mpf_div (mpf_ptr, mpf_srcptr, mpf_srcptr); + +#define mpf_div_2exp __gmpf_div_2exp +__GMP_DECLSPEC void mpf_div_2exp (mpf_ptr, mpf_srcptr, mp_bitcnt_t); + +#define mpf_div_ui __gmpf_div_ui +__GMP_DECLSPEC void mpf_div_ui (mpf_ptr, mpf_srcptr, unsigned long int); + +#define mpf_dump __gmpf_dump +__GMP_DECLSPEC void mpf_dump (mpf_srcptr); + +#define mpf_eq __gmpf_eq +__GMP_DECLSPEC int mpf_eq (mpf_srcptr, mpf_srcptr, mp_bitcnt_t) __GMP_ATTRIBUTE_PURE; + +#define mpf_fits_sint_p __gmpf_fits_sint_p +__GMP_DECLSPEC int mpf_fits_sint_p (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_fits_slong_p __gmpf_fits_slong_p +__GMP_DECLSPEC int mpf_fits_slong_p (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_fits_sshort_p __gmpf_fits_sshort_p +__GMP_DECLSPEC int mpf_fits_sshort_p (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_fits_uint_p __gmpf_fits_uint_p +__GMP_DECLSPEC int mpf_fits_uint_p (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_fits_ulong_p __gmpf_fits_ulong_p +__GMP_DECLSPEC int mpf_fits_ulong_p (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_fits_ushort_p __gmpf_fits_ushort_p +__GMP_DECLSPEC int mpf_fits_ushort_p (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_floor __gmpf_floor +__GMP_DECLSPEC void mpf_floor (mpf_ptr, mpf_srcptr); + +#define mpf_get_d __gmpf_get_d +__GMP_DECLSPEC double mpf_get_d (mpf_srcptr) __GMP_ATTRIBUTE_PURE; + +#define mpf_get_d_2exp __gmpf_get_d_2exp +__GMP_DECLSPEC double mpf_get_d_2exp (signed long int *, mpf_srcptr); + +#define mpf_get_default_prec __gmpf_get_default_prec +__GMP_DECLSPEC mp_bitcnt_t mpf_get_default_prec (void) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_get_prec __gmpf_get_prec +__GMP_DECLSPEC mp_bitcnt_t mpf_get_prec (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_get_si __gmpf_get_si +__GMP_DECLSPEC long mpf_get_si (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_get_str __gmpf_get_str +__GMP_DECLSPEC char *mpf_get_str (char *, mp_exp_t *, int, size_t, mpf_srcptr); + +#define mpf_get_ui __gmpf_get_ui +__GMP_DECLSPEC unsigned long mpf_get_ui (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_init __gmpf_init +__GMP_DECLSPEC void mpf_init (mpf_ptr); + +#define mpf_init2 __gmpf_init2 +__GMP_DECLSPEC void mpf_init2 (mpf_ptr, mp_bitcnt_t); + +#define mpf_inits __gmpf_inits +__GMP_DECLSPEC void mpf_inits (mpf_ptr, ...); + +#define mpf_init_set __gmpf_init_set +__GMP_DECLSPEC void mpf_init_set (mpf_ptr, mpf_srcptr); + +#define mpf_init_set_d __gmpf_init_set_d +__GMP_DECLSPEC void mpf_init_set_d (mpf_ptr, double); + +#define mpf_init_set_si __gmpf_init_set_si +__GMP_DECLSPEC void mpf_init_set_si (mpf_ptr, signed long int); + +#define mpf_init_set_str __gmpf_init_set_str +__GMP_DECLSPEC int mpf_init_set_str (mpf_ptr, const char *, int); + +#define mpf_init_set_ui __gmpf_init_set_ui +__GMP_DECLSPEC void mpf_init_set_ui (mpf_ptr, unsigned long int); + +#define mpf_inp_str __gmpf_inp_str +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpf_inp_str (mpf_ptr, FILE *, int); +#endif + +#define mpf_integer_p __gmpf_integer_p +__GMP_DECLSPEC int mpf_integer_p (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_mul __gmpf_mul +__GMP_DECLSPEC void mpf_mul (mpf_ptr, mpf_srcptr, mpf_srcptr); + +#define mpf_mul_2exp __gmpf_mul_2exp +__GMP_DECLSPEC void mpf_mul_2exp (mpf_ptr, mpf_srcptr, mp_bitcnt_t); + +#define mpf_mul_ui __gmpf_mul_ui +__GMP_DECLSPEC void mpf_mul_ui (mpf_ptr, mpf_srcptr, unsigned long int); + +#define mpf_neg __gmpf_neg +__GMP_DECLSPEC void mpf_neg (mpf_ptr, mpf_srcptr); + +#define mpf_out_str __gmpf_out_str +#ifdef _GMP_H_HAVE_FILE +__GMP_DECLSPEC size_t mpf_out_str (FILE *, int, size_t, mpf_srcptr); +#endif + +#define mpf_pow_ui __gmpf_pow_ui +__GMP_DECLSPEC void mpf_pow_ui (mpf_ptr, mpf_srcptr, unsigned long int); + +#define mpf_random2 __gmpf_random2 +__GMP_DECLSPEC void mpf_random2 (mpf_ptr, mp_size_t, mp_exp_t); + +#define mpf_reldiff __gmpf_reldiff +__GMP_DECLSPEC void mpf_reldiff (mpf_ptr, mpf_srcptr, mpf_srcptr); + +#define mpf_set __gmpf_set +__GMP_DECLSPEC void mpf_set (mpf_ptr, mpf_srcptr); + +#define mpf_set_d __gmpf_set_d +__GMP_DECLSPEC void mpf_set_d (mpf_ptr, double); + +#define mpf_set_default_prec __gmpf_set_default_prec +__GMP_DECLSPEC void mpf_set_default_prec (mp_bitcnt_t) __GMP_NOTHROW; + +#define mpf_set_prec __gmpf_set_prec +__GMP_DECLSPEC void mpf_set_prec (mpf_ptr, mp_bitcnt_t); + +#define mpf_set_prec_raw __gmpf_set_prec_raw +__GMP_DECLSPEC void mpf_set_prec_raw (mpf_ptr, mp_bitcnt_t) __GMP_NOTHROW; + +#define mpf_set_q __gmpf_set_q +__GMP_DECLSPEC void mpf_set_q (mpf_ptr, mpq_srcptr); + +#define mpf_set_si __gmpf_set_si +__GMP_DECLSPEC void mpf_set_si (mpf_ptr, signed long int); + +#define mpf_set_str __gmpf_set_str +__GMP_DECLSPEC int mpf_set_str (mpf_ptr, const char *, int); + +#define mpf_set_ui __gmpf_set_ui +__GMP_DECLSPEC void mpf_set_ui (mpf_ptr, unsigned long int); + +#define mpf_set_z __gmpf_set_z +__GMP_DECLSPEC void mpf_set_z (mpf_ptr, mpz_srcptr); + +#define mpf_size __gmpf_size +__GMP_DECLSPEC size_t mpf_size (mpf_srcptr) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpf_sqrt __gmpf_sqrt +__GMP_DECLSPEC void mpf_sqrt (mpf_ptr, mpf_srcptr); + +#define mpf_sqrt_ui __gmpf_sqrt_ui +__GMP_DECLSPEC void mpf_sqrt_ui (mpf_ptr, unsigned long int); + +#define mpf_sub __gmpf_sub +__GMP_DECLSPEC void mpf_sub (mpf_ptr, mpf_srcptr, mpf_srcptr); + +#define mpf_sub_ui __gmpf_sub_ui +__GMP_DECLSPEC void mpf_sub_ui (mpf_ptr, mpf_srcptr, unsigned long int); + +#define mpf_swap __gmpf_swap +__GMP_DECLSPEC void mpf_swap (mpf_ptr, mpf_ptr) __GMP_NOTHROW; + +#define mpf_trunc __gmpf_trunc +__GMP_DECLSPEC void mpf_trunc (mpf_ptr, mpf_srcptr); + +#define mpf_ui_div __gmpf_ui_div +__GMP_DECLSPEC void mpf_ui_div (mpf_ptr, unsigned long int, mpf_srcptr); + +#define mpf_ui_sub __gmpf_ui_sub +__GMP_DECLSPEC void mpf_ui_sub (mpf_ptr, unsigned long int, mpf_srcptr); + +#define mpf_urandomb __gmpf_urandomb +__GMP_DECLSPEC void mpf_urandomb (mpf_t, gmp_randstate_t, mp_bitcnt_t); + + +/************ Low level positive-integer (i.e. N) routines. ************/ + +/* This is ugly, but we need to make user calls reach the prefixed function. */ + +#define mpn_add __MPN(add) +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpn_add) +__GMP_DECLSPEC mp_limb_t mpn_add (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +#endif + +#define mpn_add_1 __MPN(add_1) +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpn_add_1) +__GMP_DECLSPEC mp_limb_t mpn_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t) __GMP_NOTHROW; +#endif + +#define mpn_add_n __MPN(add_n) +__GMP_DECLSPEC mp_limb_t mpn_add_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); + +#define mpn_addmul_1 __MPN(addmul_1) +__GMP_DECLSPEC mp_limb_t mpn_addmul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +#define mpn_cmp __MPN(cmp) +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpn_cmp) +__GMP_DECLSPEC int mpn_cmp (mp_srcptr, mp_srcptr, mp_size_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpn_zero_p __MPN(zero_p) +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpn_zero_p) +__GMP_DECLSPEC int mpn_zero_p (mp_srcptr, mp_size_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; +#endif + +#define mpn_divexact_1 __MPN(divexact_1) +__GMP_DECLSPEC void mpn_divexact_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +#define mpn_divexact_by3(dst,src,size) \ + mpn_divexact_by3c (dst, src, size, __GMP_CAST (mp_limb_t, 0)) + +#define mpn_divexact_by3c __MPN(divexact_by3c) +__GMP_DECLSPEC mp_limb_t mpn_divexact_by3c (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +#define mpn_divmod_1(qp,np,nsize,dlimb) \ + mpn_divrem_1 (qp, __GMP_CAST (mp_size_t, 0), np, nsize, dlimb) + +#define mpn_divrem __MPN(divrem) +__GMP_DECLSPEC mp_limb_t mpn_divrem (mp_ptr, mp_size_t, mp_ptr, mp_size_t, mp_srcptr, mp_size_t); + +#define mpn_divrem_1 __MPN(divrem_1) +__GMP_DECLSPEC mp_limb_t mpn_divrem_1 (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_limb_t); + +#define mpn_divrem_2 __MPN(divrem_2) +__GMP_DECLSPEC mp_limb_t mpn_divrem_2 (mp_ptr, mp_size_t, mp_ptr, mp_size_t, mp_srcptr); + +#define mpn_div_qr_1 __MPN(div_qr_1) +__GMP_DECLSPEC mp_limb_t mpn_div_qr_1 (mp_ptr, mp_limb_t *, mp_srcptr, mp_size_t, mp_limb_t); + +#define mpn_div_qr_2 __MPN(div_qr_2) +__GMP_DECLSPEC mp_limb_t mpn_div_qr_2 (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_srcptr); + +#define mpn_gcd __MPN(gcd) +__GMP_DECLSPEC mp_size_t mpn_gcd (mp_ptr, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +#define mpn_gcd_1 __MPN(gcd_1) +__GMP_DECLSPEC mp_limb_t mpn_gcd_1 (mp_srcptr, mp_size_t, mp_limb_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_gcdext_1 __MPN(gcdext_1) +__GMP_DECLSPEC mp_limb_t mpn_gcdext_1 (mp_limb_signed_t *, mp_limb_signed_t *, mp_limb_t, mp_limb_t); + +#define mpn_gcdext __MPN(gcdext) +__GMP_DECLSPEC mp_size_t mpn_gcdext (mp_ptr, mp_ptr, mp_size_t *, mp_ptr, mp_size_t, mp_ptr, mp_size_t); + +#define mpn_get_str __MPN(get_str) +__GMP_DECLSPEC size_t mpn_get_str (unsigned char *, int, mp_ptr, mp_size_t); + +#define mpn_hamdist __MPN(hamdist) +__GMP_DECLSPEC mp_bitcnt_t mpn_hamdist (mp_srcptr, mp_srcptr, mp_size_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpn_lshift __MPN(lshift) +__GMP_DECLSPEC mp_limb_t mpn_lshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +#define mpn_mod_1 __MPN(mod_1) +__GMP_DECLSPEC mp_limb_t mpn_mod_1 (mp_srcptr, mp_size_t, mp_limb_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_mul __MPN(mul) +__GMP_DECLSPEC mp_limb_t mpn_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +#define mpn_mul_1 __MPN(mul_1) +__GMP_DECLSPEC mp_limb_t mpn_mul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +#define mpn_mul_n __MPN(mul_n) +__GMP_DECLSPEC void mpn_mul_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); + +#define mpn_sqr __MPN(sqr) +__GMP_DECLSPEC void mpn_sqr (mp_ptr, mp_srcptr, mp_size_t); + +#define mpn_neg __MPN(neg) +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpn_neg) +__GMP_DECLSPEC mp_limb_t mpn_neg (mp_ptr, mp_srcptr, mp_size_t); +#endif + +#define mpn_com __MPN(com) +__GMP_DECLSPEC void mpn_com (mp_ptr, mp_srcptr, mp_size_t); + +#define mpn_perfect_square_p __MPN(perfect_square_p) +__GMP_DECLSPEC int mpn_perfect_square_p (mp_srcptr, mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_perfect_power_p __MPN(perfect_power_p) +__GMP_DECLSPEC int mpn_perfect_power_p (mp_srcptr, mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_popcount __MPN(popcount) +__GMP_DECLSPEC mp_bitcnt_t mpn_popcount (mp_srcptr, mp_size_t) __GMP_NOTHROW __GMP_ATTRIBUTE_PURE; + +#define mpn_pow_1 __MPN(pow_1) +__GMP_DECLSPEC mp_size_t mpn_pow_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr); + +/* undocumented now, but retained here for upward compatibility */ +#define mpn_preinv_mod_1 __MPN(preinv_mod_1) +__GMP_DECLSPEC mp_limb_t mpn_preinv_mod_1 (mp_srcptr, mp_size_t, mp_limb_t, mp_limb_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_random __MPN(random) +__GMP_DECLSPEC void mpn_random (mp_ptr, mp_size_t); + +#define mpn_random2 __MPN(random2) +__GMP_DECLSPEC void mpn_random2 (mp_ptr, mp_size_t); + +#define mpn_rshift __MPN(rshift) +__GMP_DECLSPEC mp_limb_t mpn_rshift (mp_ptr, mp_srcptr, mp_size_t, unsigned int); + +#define mpn_scan0 __MPN(scan0) +__GMP_DECLSPEC mp_bitcnt_t mpn_scan0 (mp_srcptr, mp_bitcnt_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_scan1 __MPN(scan1) +__GMP_DECLSPEC mp_bitcnt_t mpn_scan1 (mp_srcptr, mp_bitcnt_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_set_str __MPN(set_str) +__GMP_DECLSPEC mp_size_t mpn_set_str (mp_ptr, const unsigned char *, size_t, int); + +#define mpn_sizeinbase __MPN(sizeinbase) +__GMP_DECLSPEC size_t mpn_sizeinbase (mp_srcptr, mp_size_t, int); + +#define mpn_sqrtrem __MPN(sqrtrem) +__GMP_DECLSPEC mp_size_t mpn_sqrtrem (mp_ptr, mp_ptr, mp_srcptr, mp_size_t); + +#define mpn_sub __MPN(sub) +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpn_sub) +__GMP_DECLSPEC mp_limb_t mpn_sub (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); +#endif + +#define mpn_sub_1 __MPN(sub_1) +#if __GMP_INLINE_PROTOTYPES || defined (__GMP_FORCE_mpn_sub_1) +__GMP_DECLSPEC mp_limb_t mpn_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t) __GMP_NOTHROW; +#endif + +#define mpn_sub_n __MPN(sub_n) +__GMP_DECLSPEC mp_limb_t mpn_sub_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); + +#define mpn_submul_1 __MPN(submul_1) +__GMP_DECLSPEC mp_limb_t mpn_submul_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t); + +#define mpn_tdiv_qr __MPN(tdiv_qr) +__GMP_DECLSPEC void mpn_tdiv_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t); + +#define mpn_and_n __MPN(and_n) +__GMP_DECLSPEC void mpn_and_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_andn_n __MPN(andn_n) +__GMP_DECLSPEC void mpn_andn_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_nand_n __MPN(nand_n) +__GMP_DECLSPEC void mpn_nand_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_ior_n __MPN(ior_n) +__GMP_DECLSPEC void mpn_ior_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_iorn_n __MPN(iorn_n) +__GMP_DECLSPEC void mpn_iorn_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_nior_n __MPN(nior_n) +__GMP_DECLSPEC void mpn_nior_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_xor_n __MPN(xor_n) +__GMP_DECLSPEC void mpn_xor_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_xnor_n __MPN(xnor_n) +__GMP_DECLSPEC void mpn_xnor_n (mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); + +#define mpn_copyi __MPN(copyi) +__GMP_DECLSPEC void mpn_copyi (mp_ptr, mp_srcptr, mp_size_t); +#define mpn_copyd __MPN(copyd) +__GMP_DECLSPEC void mpn_copyd (mp_ptr, mp_srcptr, mp_size_t); +#define mpn_zero __MPN(zero) +__GMP_DECLSPEC void mpn_zero (mp_ptr, mp_size_t); + +#define mpn_cnd_add_n __MPN(cnd_add_n) +__GMP_DECLSPEC mp_limb_t mpn_cnd_add_n (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); +#define mpn_cnd_sub_n __MPN(cnd_sub_n) +__GMP_DECLSPEC mp_limb_t mpn_cnd_sub_n (mp_limb_t, mp_ptr, mp_srcptr, mp_srcptr, mp_size_t); + +#define mpn_sec_add_1 __MPN(sec_add_1) +__GMP_DECLSPEC mp_limb_t mpn_sec_add_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr); +#define mpn_sec_add_1_itch __MPN(sec_add_1_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_add_1_itch (mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_sec_sub_1 __MPN(sec_sub_1) +__GMP_DECLSPEC mp_limb_t mpn_sec_sub_1 (mp_ptr, mp_srcptr, mp_size_t, mp_limb_t, mp_ptr); +#define mpn_sec_sub_1_itch __MPN(sec_sub_1_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_sub_1_itch (mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_cnd_swap __MPN(cnd_swap) +__GMP_DECLSPEC void mpn_cnd_swap (mp_limb_t, volatile mp_limb_t *, volatile mp_limb_t *, mp_size_t); + +#define mpn_sec_mul __MPN(sec_mul) +__GMP_DECLSPEC void mpn_sec_mul (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr); +#define mpn_sec_mul_itch __MPN(sec_mul_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_mul_itch (mp_size_t, mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_sec_sqr __MPN(sec_sqr) +__GMP_DECLSPEC void mpn_sec_sqr (mp_ptr, mp_srcptr, mp_size_t, mp_ptr); +#define mpn_sec_sqr_itch __MPN(sec_sqr_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_sqr_itch (mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_sec_powm __MPN(sec_powm) +__GMP_DECLSPEC void mpn_sec_powm (mp_ptr, mp_srcptr, mp_size_t, mp_srcptr, mp_bitcnt_t, mp_srcptr, mp_size_t, mp_ptr); +#define mpn_sec_powm_itch __MPN(sec_powm_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_powm_itch (mp_size_t, mp_bitcnt_t, mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_sec_tabselect __MPN(sec_tabselect) +__GMP_DECLSPEC void mpn_sec_tabselect (volatile mp_limb_t *, volatile const mp_limb_t *, mp_size_t, mp_size_t, mp_size_t); + +#define mpn_sec_div_qr __MPN(sec_div_qr) +__GMP_DECLSPEC mp_limb_t mpn_sec_div_qr (mp_ptr, mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr); +#define mpn_sec_div_qr_itch __MPN(sec_div_qr_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_div_qr_itch (mp_size_t, mp_size_t) __GMP_ATTRIBUTE_PURE; +#define mpn_sec_div_r __MPN(sec_div_r) +__GMP_DECLSPEC void mpn_sec_div_r (mp_ptr, mp_size_t, mp_srcptr, mp_size_t, mp_ptr); +#define mpn_sec_div_r_itch __MPN(sec_div_r_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_div_r_itch (mp_size_t, mp_size_t) __GMP_ATTRIBUTE_PURE; + +#define mpn_sec_invert __MPN(sec_invert) +__GMP_DECLSPEC int mpn_sec_invert (mp_ptr, mp_ptr, mp_srcptr, mp_size_t, mp_bitcnt_t, mp_ptr); +#define mpn_sec_invert_itch __MPN(sec_invert_itch) +__GMP_DECLSPEC mp_size_t mpn_sec_invert_itch (mp_size_t) __GMP_ATTRIBUTE_PURE; + + +/**************** mpz inlines ****************/ + +/* The following are provided as inlines where possible, but always exist as + library functions too, for binary compatibility. + + Within gmp itself this inlining generally isn't relied on, since it + doesn't get done for all compilers, whereas if something is worth + inlining then it's worth arranging always. + + There are two styles of inlining here. When the same bit of code is + wanted for the inline as for the library version, then __GMP_FORCE_foo + arranges for that code to be emitted and the __GMP_EXTERN_INLINE + directive suppressed, eg. mpz_fits_uint_p. When a different bit of code + is wanted for the inline than for the library version, then + __GMP_FORCE_foo arranges the inline to be suppressed, eg. mpz_abs. */ + +#if defined (__GMP_EXTERN_INLINE) && ! defined (__GMP_FORCE_mpz_abs) +__GMP_EXTERN_INLINE void +mpz_abs (mpz_ptr __gmp_w, mpz_srcptr __gmp_u) +{ + if (__gmp_w != __gmp_u) + mpz_set (__gmp_w, __gmp_u); + __gmp_w->_mp_size = __GMP_ABS (__gmp_w->_mp_size); +} +#endif + +#if GMP_NAIL_BITS == 0 +#define __GMPZ_FITS_UTYPE_P(z,maxval) \ + mp_size_t __gmp_n = z->_mp_size; \ + mp_ptr __gmp_p = z->_mp_d; \ + return (__gmp_n == 0 || (__gmp_n == 1 && __gmp_p[0] <= maxval)); +#else +#define __GMPZ_FITS_UTYPE_P(z,maxval) \ + mp_size_t __gmp_n = z->_mp_size; \ + mp_ptr __gmp_p = z->_mp_d; \ + return (__gmp_n == 0 || (__gmp_n == 1 && __gmp_p[0] <= maxval) \ + || (__gmp_n == 2 && __gmp_p[1] <= ((mp_limb_t) maxval >> GMP_NUMB_BITS))); +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_fits_uint_p) +#if ! defined (__GMP_FORCE_mpz_fits_uint_p) +__GMP_EXTERN_INLINE +#endif +int +mpz_fits_uint_p (mpz_srcptr __gmp_z) __GMP_NOTHROW +{ + __GMPZ_FITS_UTYPE_P (__gmp_z, UINT_MAX); +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_fits_ulong_p) +#if ! defined (__GMP_FORCE_mpz_fits_ulong_p) +__GMP_EXTERN_INLINE +#endif +int +mpz_fits_ulong_p (mpz_srcptr __gmp_z) __GMP_NOTHROW +{ + __GMPZ_FITS_UTYPE_P (__gmp_z, ULONG_MAX); +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_fits_ushort_p) +#if ! defined (__GMP_FORCE_mpz_fits_ushort_p) +__GMP_EXTERN_INLINE +#endif +int +mpz_fits_ushort_p (mpz_srcptr __gmp_z) __GMP_NOTHROW +{ + __GMPZ_FITS_UTYPE_P (__gmp_z, USHRT_MAX); +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_get_ui) +#if ! defined (__GMP_FORCE_mpz_get_ui) +__GMP_EXTERN_INLINE +#endif +unsigned long +mpz_get_ui (mpz_srcptr __gmp_z) __GMP_NOTHROW +{ + mp_ptr __gmp_p = __gmp_z->_mp_d; + mp_size_t __gmp_n = __gmp_z->_mp_size; + mp_limb_t __gmp_l = __gmp_p[0]; + /* This is a "#if" rather than a plain "if" so as to avoid gcc warnings + about "<< GMP_NUMB_BITS" exceeding the type size, and to avoid Borland + C++ 6.0 warnings about condition always true for something like + "ULONG_MAX < GMP_NUMB_MASK". */ +#if GMP_NAIL_BITS == 0 || defined (_LONG_LONG_LIMB) + /* limb==long and no nails, or limb==longlong, one limb is enough */ + return (__gmp_n != 0 ? __gmp_l : 0); +#else + /* limb==long and nails, need two limbs when available */ + __gmp_n = __GMP_ABS (__gmp_n); + if (__gmp_n <= 1) + return (__gmp_n != 0 ? __gmp_l : 0); + else + return __gmp_l + (__gmp_p[1] << GMP_NUMB_BITS); +#endif +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_getlimbn) +#if ! defined (__GMP_FORCE_mpz_getlimbn) +__GMP_EXTERN_INLINE +#endif +mp_limb_t +mpz_getlimbn (mpz_srcptr __gmp_z, mp_size_t __gmp_n) __GMP_NOTHROW +{ + mp_limb_t __gmp_result = 0; + if (__GMP_LIKELY (__gmp_n >= 0 && __gmp_n < __GMP_ABS (__gmp_z->_mp_size))) + __gmp_result = __gmp_z->_mp_d[__gmp_n]; + return __gmp_result; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) && ! defined (__GMP_FORCE_mpz_neg) +__GMP_EXTERN_INLINE void +mpz_neg (mpz_ptr __gmp_w, mpz_srcptr __gmp_u) +{ + if (__gmp_w != __gmp_u) + mpz_set (__gmp_w, __gmp_u); + __gmp_w->_mp_size = - __gmp_w->_mp_size; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_perfect_square_p) +#if ! defined (__GMP_FORCE_mpz_perfect_square_p) +__GMP_EXTERN_INLINE +#endif +int +mpz_perfect_square_p (mpz_srcptr __gmp_a) +{ + mp_size_t __gmp_asize; + int __gmp_result; + + __gmp_asize = __gmp_a->_mp_size; + __gmp_result = (__gmp_asize >= 0); /* zero is a square, negatives are not */ + if (__GMP_LIKELY (__gmp_asize > 0)) + __gmp_result = mpn_perfect_square_p (__gmp_a->_mp_d, __gmp_asize); + return __gmp_result; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_popcount) +#if ! defined (__GMP_FORCE_mpz_popcount) +__GMP_EXTERN_INLINE +#endif +mp_bitcnt_t +mpz_popcount (mpz_srcptr __gmp_u) __GMP_NOTHROW +{ + mp_size_t __gmp_usize; + mp_bitcnt_t __gmp_result; + + __gmp_usize = __gmp_u->_mp_size; + __gmp_result = (__gmp_usize < 0 ? ULONG_MAX : 0); + if (__GMP_LIKELY (__gmp_usize > 0)) + __gmp_result = mpn_popcount (__gmp_u->_mp_d, __gmp_usize); + return __gmp_result; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_set_q) +#if ! defined (__GMP_FORCE_mpz_set_q) +__GMP_EXTERN_INLINE +#endif +void +mpz_set_q (mpz_ptr __gmp_w, mpq_srcptr __gmp_u) +{ + mpz_tdiv_q (__gmp_w, mpq_numref (__gmp_u), mpq_denref (__gmp_u)); +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpz_size) +#if ! defined (__GMP_FORCE_mpz_size) +__GMP_EXTERN_INLINE +#endif +size_t +mpz_size (mpz_srcptr __gmp_z) __GMP_NOTHROW +{ + return __GMP_ABS (__gmp_z->_mp_size); +} +#endif + + +/**************** mpq inlines ****************/ + +#if defined (__GMP_EXTERN_INLINE) && ! defined (__GMP_FORCE_mpq_abs) +__GMP_EXTERN_INLINE void +mpq_abs (mpq_ptr __gmp_w, mpq_srcptr __gmp_u) +{ + if (__gmp_w != __gmp_u) + mpq_set (__gmp_w, __gmp_u); + __gmp_w->_mp_num._mp_size = __GMP_ABS (__gmp_w->_mp_num._mp_size); +} +#endif + +#if defined (__GMP_EXTERN_INLINE) && ! defined (__GMP_FORCE_mpq_neg) +__GMP_EXTERN_INLINE void +mpq_neg (mpq_ptr __gmp_w, mpq_srcptr __gmp_u) +{ + if (__gmp_w != __gmp_u) + mpq_set (__gmp_w, __gmp_u); + __gmp_w->_mp_num._mp_size = - __gmp_w->_mp_num._mp_size; +} +#endif + + +/**************** mpn inlines ****************/ + +/* The comments with __GMPN_ADD_1 below apply here too. + + The test for FUNCTION returning 0 should predict well. If it's assumed + {yp,ysize} will usually have a random number of bits then the high limb + won't be full and a carry out will occur a good deal less than 50% of the + time. + + ysize==0 isn't a documented feature, but is used internally in a few + places. + + Producing cout last stops it using up a register during the main part of + the calculation, though gcc (as of 3.0) on an "if (mpn_add (...))" + doesn't seem able to move the true and false legs of the conditional up + to the two places cout is generated. */ + +#define __GMPN_AORS(cout, wp, xp, xsize, yp, ysize, FUNCTION, TEST) \ + do { \ + mp_size_t __gmp_i; \ + mp_limb_t __gmp_x; \ + \ + /* ASSERT ((ysize) >= 0); */ \ + /* ASSERT ((xsize) >= (ysize)); */ \ + /* ASSERT (MPN_SAME_OR_SEPARATE2_P (wp, xsize, xp, xsize)); */ \ + /* ASSERT (MPN_SAME_OR_SEPARATE2_P (wp, xsize, yp, ysize)); */ \ + \ + __gmp_i = (ysize); \ + if (__gmp_i != 0) \ + { \ + if (FUNCTION (wp, xp, yp, __gmp_i)) \ + { \ + do \ + { \ + if (__gmp_i >= (xsize)) \ + { \ + (cout) = 1; \ + goto __gmp_done; \ + } \ + __gmp_x = (xp)[__gmp_i]; \ + } \ + while (TEST); \ + } \ + } \ + if ((wp) != (xp)) \ + __GMPN_COPY_REST (wp, xp, xsize, __gmp_i); \ + (cout) = 0; \ + __gmp_done: \ + ; \ + } while (0) + +#define __GMPN_ADD(cout, wp, xp, xsize, yp, ysize) \ + __GMPN_AORS (cout, wp, xp, xsize, yp, ysize, mpn_add_n, \ + (((wp)[__gmp_i++] = (__gmp_x + 1) & GMP_NUMB_MASK) == 0)) +#define __GMPN_SUB(cout, wp, xp, xsize, yp, ysize) \ + __GMPN_AORS (cout, wp, xp, xsize, yp, ysize, mpn_sub_n, \ + (((wp)[__gmp_i++] = (__gmp_x - 1) & GMP_NUMB_MASK), __gmp_x == 0)) + + +/* The use of __gmp_i indexing is designed to ensure a compile time src==dst + remains nice and clear to the compiler, so that __GMPN_COPY_REST can + disappear, and the load/add/store gets a chance to become a + read-modify-write on CISC CPUs. + + Alternatives: + + Using a pair of pointers instead of indexing would be possible, but gcc + isn't able to recognise compile-time src==dst in that case, even when the + pointers are incremented more or less together. Other compilers would + very likely have similar difficulty. + + gcc could use "if (__builtin_constant_p(src==dst) && src==dst)" or + similar to detect a compile-time src==dst. This works nicely on gcc + 2.95.x, it's not good on gcc 3.0 where __builtin_constant_p(p==p) seems + to be always false, for a pointer p. But the current code form seems + good enough for src==dst anyway. + + gcc on x86 as usual doesn't give particularly good flags handling for the + carry/borrow detection. It's tempting to want some multi instruction asm + blocks to help it, and this was tried, but in truth there's only a few + instructions to save and any gain is all too easily lost by register + juggling setting up for the asm. */ + +#if GMP_NAIL_BITS == 0 +#define __GMPN_AORS_1(cout, dst, src, n, v, OP, CB) \ + do { \ + mp_size_t __gmp_i; \ + mp_limb_t __gmp_x, __gmp_r; \ + \ + /* ASSERT ((n) >= 1); */ \ + /* ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, n)); */ \ + \ + __gmp_x = (src)[0]; \ + __gmp_r = __gmp_x OP (v); \ + (dst)[0] = __gmp_r; \ + if (CB (__gmp_r, __gmp_x, (v))) \ + { \ + (cout) = 1; \ + for (__gmp_i = 1; __gmp_i < (n);) \ + { \ + __gmp_x = (src)[__gmp_i]; \ + __gmp_r = __gmp_x OP 1; \ + (dst)[__gmp_i] = __gmp_r; \ + ++__gmp_i; \ + if (!CB (__gmp_r, __gmp_x, 1)) \ + { \ + if ((src) != (dst)) \ + __GMPN_COPY_REST (dst, src, n, __gmp_i); \ + (cout) = 0; \ + break; \ + } \ + } \ + } \ + else \ + { \ + if ((src) != (dst)) \ + __GMPN_COPY_REST (dst, src, n, 1); \ + (cout) = 0; \ + } \ + } while (0) +#endif + +#if GMP_NAIL_BITS >= 1 +#define __GMPN_AORS_1(cout, dst, src, n, v, OP, CB) \ + do { \ + mp_size_t __gmp_i; \ + mp_limb_t __gmp_x, __gmp_r; \ + \ + /* ASSERT ((n) >= 1); */ \ + /* ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, n)); */ \ + \ + __gmp_x = (src)[0]; \ + __gmp_r = __gmp_x OP (v); \ + (dst)[0] = __gmp_r & GMP_NUMB_MASK; \ + if (__gmp_r >> GMP_NUMB_BITS != 0) \ + { \ + (cout) = 1; \ + for (__gmp_i = 1; __gmp_i < (n);) \ + { \ + __gmp_x = (src)[__gmp_i]; \ + __gmp_r = __gmp_x OP 1; \ + (dst)[__gmp_i] = __gmp_r & GMP_NUMB_MASK; \ + ++__gmp_i; \ + if (__gmp_r >> GMP_NUMB_BITS == 0) \ + { \ + if ((src) != (dst)) \ + __GMPN_COPY_REST (dst, src, n, __gmp_i); \ + (cout) = 0; \ + break; \ + } \ + } \ + } \ + else \ + { \ + if ((src) != (dst)) \ + __GMPN_COPY_REST (dst, src, n, 1); \ + (cout) = 0; \ + } \ + } while (0) +#endif + +#define __GMPN_ADDCB(r,x,y) ((r) < (y)) +#define __GMPN_SUBCB(r,x,y) ((x) < (y)) + +#define __GMPN_ADD_1(cout, dst, src, n, v) \ + __GMPN_AORS_1(cout, dst, src, n, v, +, __GMPN_ADDCB) +#define __GMPN_SUB_1(cout, dst, src, n, v) \ + __GMPN_AORS_1(cout, dst, src, n, v, -, __GMPN_SUBCB) + + +/* Compare {xp,size} and {yp,size}, setting "result" to positive, zero or + negative. size==0 is allowed. On random data usually only one limb will + need to be examined to get a result, so it's worth having it inline. */ +#define __GMPN_CMP(result, xp, yp, size) \ + do { \ + mp_size_t __gmp_i; \ + mp_limb_t __gmp_x, __gmp_y; \ + \ + /* ASSERT ((size) >= 0); */ \ + \ + (result) = 0; \ + __gmp_i = (size); \ + while (--__gmp_i >= 0) \ + { \ + __gmp_x = (xp)[__gmp_i]; \ + __gmp_y = (yp)[__gmp_i]; \ + if (__gmp_x != __gmp_y) \ + { \ + /* Cannot use __gmp_x - __gmp_y, may overflow an "int" */ \ + (result) = (__gmp_x > __gmp_y ? 1 : -1); \ + break; \ + } \ + } \ + } while (0) + + +#if defined (__GMPN_COPY) && ! defined (__GMPN_COPY_REST) +#define __GMPN_COPY_REST(dst, src, size, start) \ + do { \ + /* ASSERT ((start) >= 0); */ \ + /* ASSERT ((start) <= (size)); */ \ + __GMPN_COPY ((dst)+(start), (src)+(start), (size)-(start)); \ + } while (0) +#endif + +/* Copy {src,size} to {dst,size}, starting at "start". This is designed to + keep the indexing dst[j] and src[j] nice and simple for __GMPN_ADD_1, + __GMPN_ADD, etc. */ +#if ! defined (__GMPN_COPY_REST) +#define __GMPN_COPY_REST(dst, src, size, start) \ + do { \ + mp_size_t __gmp_j; \ + /* ASSERT ((size) >= 0); */ \ + /* ASSERT ((start) >= 0); */ \ + /* ASSERT ((start) <= (size)); */ \ + /* ASSERT (MPN_SAME_OR_SEPARATE_P (dst, src, size)); */ \ + __GMP_CRAY_Pragma ("_CRI ivdep"); \ + for (__gmp_j = (start); __gmp_j < (size); __gmp_j++) \ + (dst)[__gmp_j] = (src)[__gmp_j]; \ + } while (0) +#endif + +/* Enhancement: Use some of the smarter code from gmp-impl.h. Maybe use + mpn_copyi if there's a native version, and if we don't mind demanding + binary compatibility for it (on targets which use it). */ + +#if ! defined (__GMPN_COPY) +#define __GMPN_COPY(dst, src, size) __GMPN_COPY_REST (dst, src, size, 0) +#endif + + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpn_add) +#if ! defined (__GMP_FORCE_mpn_add) +__GMP_EXTERN_INLINE +#endif +mp_limb_t +mpn_add (mp_ptr __gmp_wp, mp_srcptr __gmp_xp, mp_size_t __gmp_xsize, mp_srcptr __gmp_yp, mp_size_t __gmp_ysize) +{ + mp_limb_t __gmp_c; + __GMPN_ADD (__gmp_c, __gmp_wp, __gmp_xp, __gmp_xsize, __gmp_yp, __gmp_ysize); + return __gmp_c; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpn_add_1) +#if ! defined (__GMP_FORCE_mpn_add_1) +__GMP_EXTERN_INLINE +#endif +mp_limb_t +mpn_add_1 (mp_ptr __gmp_dst, mp_srcptr __gmp_src, mp_size_t __gmp_size, mp_limb_t __gmp_n) __GMP_NOTHROW +{ + mp_limb_t __gmp_c; + __GMPN_ADD_1 (__gmp_c, __gmp_dst, __gmp_src, __gmp_size, __gmp_n); + return __gmp_c; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpn_cmp) +#if ! defined (__GMP_FORCE_mpn_cmp) +__GMP_EXTERN_INLINE +#endif +int +mpn_cmp (mp_srcptr __gmp_xp, mp_srcptr __gmp_yp, mp_size_t __gmp_size) __GMP_NOTHROW +{ + int __gmp_result; + __GMPN_CMP (__gmp_result, __gmp_xp, __gmp_yp, __gmp_size); + return __gmp_result; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpn_zero_p) +#if ! defined (__GMP_FORCE_mpn_zero_p) +__GMP_EXTERN_INLINE +#endif +int +mpn_zero_p (mp_srcptr __gmp_p, mp_size_t __gmp_n) __GMP_NOTHROW +{ + /* if (__GMP_LIKELY (__gmp_n > 0)) */ + do { + if (__gmp_p[--__gmp_n] != 0) + return 0; + } while (__gmp_n != 0); + return 1; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpn_sub) +#if ! defined (__GMP_FORCE_mpn_sub) +__GMP_EXTERN_INLINE +#endif +mp_limb_t +mpn_sub (mp_ptr __gmp_wp, mp_srcptr __gmp_xp, mp_size_t __gmp_xsize, mp_srcptr __gmp_yp, mp_size_t __gmp_ysize) +{ + mp_limb_t __gmp_c; + __GMPN_SUB (__gmp_c, __gmp_wp, __gmp_xp, __gmp_xsize, __gmp_yp, __gmp_ysize); + return __gmp_c; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpn_sub_1) +#if ! defined (__GMP_FORCE_mpn_sub_1) +__GMP_EXTERN_INLINE +#endif +mp_limb_t +mpn_sub_1 (mp_ptr __gmp_dst, mp_srcptr __gmp_src, mp_size_t __gmp_size, mp_limb_t __gmp_n) __GMP_NOTHROW +{ + mp_limb_t __gmp_c; + __GMPN_SUB_1 (__gmp_c, __gmp_dst, __gmp_src, __gmp_size, __gmp_n); + return __gmp_c; +} +#endif + +#if defined (__GMP_EXTERN_INLINE) || defined (__GMP_FORCE_mpn_neg) +#if ! defined (__GMP_FORCE_mpn_neg) +__GMP_EXTERN_INLINE +#endif +mp_limb_t +mpn_neg (mp_ptr __gmp_rp, mp_srcptr __gmp_up, mp_size_t __gmp_n) +{ + while (*__gmp_up == 0) /* Low zero limbs are unchanged by negation. */ + { + *__gmp_rp = 0; + if (!--__gmp_n) /* All zero */ + return 0; + ++__gmp_up; ++__gmp_rp; + } + + *__gmp_rp = (- *__gmp_up) & GMP_NUMB_MASK; + + if (--__gmp_n) /* Higher limbs get complemented. */ + mpn_com (++__gmp_rp, ++__gmp_up, __gmp_n); + + return 1; +} +#endif + +#if defined (__cplusplus) +} +#endif + + +/* Allow faster testing for negative, zero, and positive. */ +#define mpz_sgn(Z) ((Z)->_mp_size < 0 ? -1 : (Z)->_mp_size > 0) +#define mpf_sgn(F) ((F)->_mp_size < 0 ? -1 : (F)->_mp_size > 0) +#define mpq_sgn(Q) ((Q)->_mp_num._mp_size < 0 ? -1 : (Q)->_mp_num._mp_size > 0) + +/* When using GCC, optimize certain common comparisons. */ +#if defined (__GNUC__) && __GNUC__ >= 2 +#define mpz_cmp_ui(Z,UI) \ + (__builtin_constant_p (UI) && (UI) == 0 \ + ? mpz_sgn (Z) : _mpz_cmp_ui (Z,UI)) +#define mpz_cmp_si(Z,SI) \ + (__builtin_constant_p ((SI) >= 0) && (SI) >= 0 \ + ? mpz_cmp_ui (Z, __GMP_CAST (unsigned long, SI)) \ + : _mpz_cmp_si (Z,SI)) +#define mpq_cmp_ui(Q,NUI,DUI) \ + (__builtin_constant_p (NUI) && (NUI) == 0 ? mpq_sgn (Q) \ + : __builtin_constant_p ((NUI) == (DUI)) && (NUI) == (DUI) \ + ? mpz_cmp (mpq_numref (Q), mpq_denref (Q)) \ + : _mpq_cmp_ui (Q,NUI,DUI)) +#define mpq_cmp_si(q,n,d) \ + (__builtin_constant_p ((n) >= 0) && (n) >= 0 \ + ? mpq_cmp_ui (q, __GMP_CAST (unsigned long, n), d) \ + : _mpq_cmp_si (q, n, d)) +#else +#define mpz_cmp_ui(Z,UI) _mpz_cmp_ui (Z,UI) +#define mpz_cmp_si(Z,UI) _mpz_cmp_si (Z,UI) +#define mpq_cmp_ui(Q,NUI,DUI) _mpq_cmp_ui (Q,NUI,DUI) +#define mpq_cmp_si(q,n,d) _mpq_cmp_si(q,n,d) +#endif + + +/* Using "&" rather than "&&" means these can come out branch-free. Every + mpz_t has at least one limb allocated, so fetching the low limb is always + allowed. */ +#define mpz_odd_p(z) (((z)->_mp_size != 0) & __GMP_CAST (int, (z)->_mp_d[0])) +#define mpz_even_p(z) (! mpz_odd_p (z)) + + +/**************** C++ routines ****************/ + +#ifdef __cplusplus +__GMP_DECLSPEC_XX std::ostream& operator<< (std::ostream &, mpz_srcptr); +__GMP_DECLSPEC_XX std::ostream& operator<< (std::ostream &, mpq_srcptr); +__GMP_DECLSPEC_XX std::ostream& operator<< (std::ostream &, mpf_srcptr); +__GMP_DECLSPEC_XX std::istream& operator>> (std::istream &, mpz_ptr); +__GMP_DECLSPEC_XX std::istream& operator>> (std::istream &, mpq_ptr); +__GMP_DECLSPEC_XX std::istream& operator>> (std::istream &, mpf_ptr); +#endif + + +/* Source-level compatibility with GMP 2 and earlier. */ +#define mpn_divmod(qp,np,nsize,dp,dsize) \ + mpn_divrem (qp, __GMP_CAST (mp_size_t, 0), np, nsize, dp, dsize) + +/* Source-level compatibility with GMP 1. */ +#define mpz_mdiv mpz_fdiv_q +#define mpz_mdivmod mpz_fdiv_qr +#define mpz_mmod mpz_fdiv_r +#define mpz_mdiv_ui mpz_fdiv_q_ui +#define mpz_mdivmod_ui(q,r,n,d) \ + (((r) == 0) ? mpz_fdiv_q_ui (q,n,d) : mpz_fdiv_qr_ui (q,r,n,d)) +#define mpz_mmod_ui(r,n,d) \ + (((r) == 0) ? mpz_fdiv_ui (n,d) : mpz_fdiv_r_ui (r,n,d)) + +/* Useful synonyms, but not quite compatible with GMP 1. */ +#define mpz_div mpz_fdiv_q +#define mpz_divmod mpz_fdiv_qr +#define mpz_div_ui mpz_fdiv_q_ui +#define mpz_divmod_ui mpz_fdiv_qr_ui +#define mpz_div_2exp mpz_fdiv_q_2exp +#define mpz_mod_2exp mpz_fdiv_r_2exp + +enum +{ + GMP_ERROR_NONE = 0, + GMP_ERROR_UNSUPPORTED_ARGUMENT = 1, + GMP_ERROR_DIVISION_BY_ZERO = 2, + GMP_ERROR_SQRT_OF_NEGATIVE = 4, + GMP_ERROR_INVALID_ARGUMENT = 8 +}; + +/* Define CC and CFLAGS which were used to build this version of GMP */ +#define __GMP_CC "gcc" +#define __GMP_CFLAGS "-m32 -O2 -pedantic -fomit-frame-pointer -mtune=pentiumpro -march=pentiumpro" + +/* Major version number is the value of __GNU_MP__ too, above. */ +#define __GNU_MP_VERSION 6 +#define __GNU_MP_VERSION_MINOR 1 +#define __GNU_MP_VERSION_PATCHLEVEL 2 +#define __GNU_MP_RELEASE (__GNU_MP_VERSION * 10000 + __GNU_MP_VERSION_MINOR * 100 + __GNU_MP_VERSION_PATCHLEVEL) + +#define __GMP_H__ +#endif /* __GMP_H__ */ diff --git a/depend/secp256k1/include/secp256k1.h b/depend/secp256k1/include/secp256k1.h index f1f78ab7d..3e90b1bc7 100644 --- a/depend/secp256k1/include/secp256k1.h +++ b/depend/secp256k1/include/secp256k1.h @@ -33,9 +33,10 @@ extern "C" { * verification). * * A constructed context can safely be used from multiple threads - * simultaneously, but API call that take a non-const pointer to a context + * simultaneously, but API calls that take a non-const pointer to a context * need exclusive access to it. In particular this is the case for - * secp256k1_context_destroy and secp256k1_context_randomize. + * secp256k1_context_destroy, secp256k1_context_preallocated_destroy, + * and secp256k1_context_randomize. * * Regarding randomization, either do it once at creation time (in which case * you do not need any locking for the other calls), or use a read-write lock. @@ -163,7 +164,8 @@ typedef int (*secp256k1_nonce_function)( #define SECP256K1_FLAGS_BIT_CONTEXT_SIGN (1 << 9) #define SECP256K1_FLAGS_BIT_COMPRESSION (1 << 8) -/** Flags to pass to secp256k1_context_create. */ +/** Flags to pass to secp256k1_context_create, secp256k1_context_preallocated_size, and + * secp256k1_context_preallocated_create. */ #define SECP256K1_CONTEXT_VERIFY (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) #define SECP256K1_CONTEXT_SIGN (SECP256K1_FLAGS_TYPE_CONTEXT | SECP256K1_FLAGS_BIT_CONTEXT_SIGN) #define SECP256K1_CONTEXT_NONE (SECP256K1_FLAGS_TYPE_CONTEXT) @@ -186,7 +188,11 @@ typedef int (*secp256k1_nonce_function)( */ SECP256K1_API extern const secp256k1_context *secp256k1_context_no_precomp; -/** Create a secp256k1 context object. +/** Create a secp256k1 context object (in dynamically allocated memory). + * + * This function uses malloc to allocate memory. It is guaranteed that malloc is + * called at most once for every call of this function. If you need to avoid dynamic + * memory allocation entirely, see the functions in secp256k1_preallocated.h. * * Returns: a newly created context object. * In: flags: which parts of the context to initialize. @@ -197,7 +203,11 @@ SECP256K1_API secp256k1_context* secp256k1_context_create( unsigned int flags ) SECP256K1_WARN_UNUSED_RESULT; -/** Copies a secp256k1 context object. +/** Copy a secp256k1 context object (into dynamically allocated memory). + * + * This function uses malloc to allocate memory. It is guaranteed that malloc is + * called at most once for every call of this function. If you need to avoid dynamic + * memory allocation entirely, see the functions in secp256k1_preallocated.h. * * Returns: a newly created context object. * Args: ctx: an existing context to copy (cannot be NULL) @@ -206,10 +216,18 @@ SECP256K1_API secp256k1_context* secp256k1_context_clone( const secp256k1_context* ctx ) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; -/** Destroy a secp256k1 context object. +/** Destroy a secp256k1 context object (created in dynamically allocated memory). * * The context pointer may not be used afterwards. - * Args: ctx: an existing context to destroy (cannot be NULL) + * + * The context to destroy must have been created using secp256k1_context_create + * or secp256k1_context_clone. If the context has instead been created using + * secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone, the + * behaviour is undefined. In that case, secp256k1_context_preallocated_destroy must + * be used instead. + * + * Args: ctx: an existing context to destroy, constructed using + * secp256k1_context_create or secp256k1_context_clone */ SECP256K1_API void secp256k1_context_destroy( secp256k1_context* ctx @@ -229,11 +247,28 @@ SECP256K1_API void secp256k1_context_destroy( * to cause a crash, though its return value and output arguments are * undefined. * + * When this function has not been called (or called with fn==NULL), then the + * default handler will be used. The library provides a default handler which + * writes the message to stderr and calls abort. This default handler can be + * replaced at link time if the preprocessor macro + * USE_EXTERNAL_DEFAULT_CALLBACKS is defined, which is the case if the build + * has been configured with --enable-external-default-callbacks. Then the + * following two symbols must be provided to link against: + * - void secp256k1_default_illegal_callback_fn(const char* message, void* data); + * - void secp256k1_default_error_callback_fn(const char* message, void* data); + * The library can call these default handlers even before a proper callback data + * pointer could have been set using secp256k1_context_set_illegal_callback or + * secp256k1_context_set_illegal_callback, e.g., when the creation of a context + * fails. In this case, the corresponding default handler will be called with + * the data pointer argument set to NULL. + * * Args: ctx: an existing context object (cannot be NULL) * In: fun: a pointer to a function to call when an illegal argument is - * passed to the API, taking a message and an opaque pointer - * (NULL restores a default handler that calls abort). + * passed to the API, taking a message and an opaque pointer. + * (NULL restores the default handler.) * data: the opaque pointer to pass to fun above. + * + * See also secp256k1_context_set_error_callback. */ SECP256K1_API void secp256k1_context_set_illegal_callback( secp256k1_context* ctx, @@ -253,9 +288,12 @@ SECP256K1_API void secp256k1_context_set_illegal_callback( * * Args: ctx: an existing context object (cannot be NULL) * In: fun: a pointer to a function to call when an internal error occurs, - * taking a message and an opaque pointer (NULL restores a default - * handler that calls abort). + * taking a message and an opaque pointer (NULL restores the + * default handler, see secp256k1_context_set_illegal_callback + * for details). * data: the opaque pointer to pass to fun above. + * + * See also secp256k1_context_set_illegal_callback. */ SECP256K1_API void secp256k1_context_set_error_callback( secp256k1_context* ctx, @@ -267,21 +305,24 @@ SECP256K1_API void secp256k1_context_set_error_callback( * * Returns: a newly created scratch space. * Args: ctx: an existing context object (cannot be NULL) - * In: max_size: maximum amount of memory to allocate + * In: size: amount of memory to be available as scratch space. Some extra + * (<100 bytes) will be allocated for extra accounting. */ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT secp256k1_scratch_space* secp256k1_scratch_space_create( const secp256k1_context* ctx, - size_t max_size + size_t size ) SECP256K1_ARG_NONNULL(1); /** Destroy a secp256k1 scratch space. * * The pointer may not be used afterwards. - * Args: scratch: space to destroy + * Args: ctx: a secp256k1 context object. + * scratch: space to destroy */ SECP256K1_API void secp256k1_scratch_space_destroy( + const secp256k1_context* ctx, secp256k1_scratch_space* scratch -); +) SECP256K1_ARG_NONNULL(1); /** Parse a variable-length public key into the pubkey object. * @@ -615,7 +656,7 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul( ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Updates the context randomization to protect against side-channel leakage. - * Returns: 1: randomization successfully updated + * Returns: 1: randomization successfully updated or nothing to randomize * 0: error * Args: ctx: pointer to a context object (cannot be NULL) * In: seed32: pointer to a 32-byte random seed (NULL resets to initial state) @@ -630,8 +671,14 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_ec_pubkey_tweak_mul( * that it does not affect function results, but shields against attacks which * rely on any input-dependent behaviour. * + * This function has currently an effect only on contexts initialized for signing + * because randomization is currently used only for signing. However, this is not + * guaranteed and may change in the future. It is safe to call this function on + * contexts not initialized for signing; then it will have no effect and return 1. + * * You should call this after secp256k1_context_create or - * secp256k1_context_clone, and may call this repeatedly afterwards. + * secp256k1_context_clone (and secp256k1_context_preallocated_create or + * secp256k1_context_clone, resp.), and you may call this repeatedly afterwards. */ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int secp256k1_context_randomize( secp256k1_context* ctx, diff --git a/depend/secp256k1/include/secp256k1_preallocated.h b/depend/secp256k1/include/secp256k1_preallocated.h new file mode 100644 index 000000000..0fb64a543 --- /dev/null +++ b/depend/secp256k1/include/secp256k1_preallocated.h @@ -0,0 +1,128 @@ +#ifndef SECP256K1_PREALLOCATED_H +#define SECP256K1_PREALLOCATED_H + +#include "secp256k1.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* The module provided by this header file is intended for settings in which it + * is not possible or desirable to rely on dynamic memory allocation. It provides + * functions for creating, cloning, and destroying secp256k1 context objects in a + * contiguous fixed-size block of memory provided by the caller. + * + * Context objects created by functions in this module can be used like contexts + * objects created by functions in secp256k1.h, i.e., they can be passed to any + * API function that excepts a context object (see secp256k1.h for details). The + * only exception is that context objects created by functions in this module + * must be destroyed using secp256k1_context_preallocated_destroy (in this + * module) instead of secp256k1_context_destroy (in secp256k1.h). + * + * It is guaranteed that functions in by this module will not call malloc or its + * friends realloc, calloc, and free. + */ + +/** Determine the memory size of a secp256k1 context object to be created in + * caller-provided memory. + * + * The purpose of this function is to determine how much memory must be provided + * to secp256k1_context_preallocated_create. + * + * Returns: the required size of the caller-provided memory block + * In: flags: which parts of the context to initialize. + */ +SECP256K1_API size_t secp256k1_context_preallocated_size( + unsigned int flags +) SECP256K1_WARN_UNUSED_RESULT; + +/** Create a secp256k1 context object in caller-provided memory. + * + * The caller must provide a pointer to a rewritable contiguous block of memory + * of size at least secp256k1_context_preallocated_size(flags) bytes, suitably + * aligned to hold an object of any type. + * + * The block of memory is exclusively owned by the created context object during + * the lifetime of this context object, which begins with the call to this + * function and ends when a call to secp256k1_context_preallocated_destroy + * (which destroys the context object again) returns. During the lifetime of the + * context object, the caller is obligated not to access this block of memory, + * i.e., the caller may not read or write the memory, e.g., by copying the memory + * contents to a different location or trying to create a second context object + * in the memory. In simpler words, the prealloc pointer (or any pointer derived + * from it) should not be used during the lifetime of the context object. + * + * Returns: a newly created context object. + * In: prealloc: a pointer to a rewritable contiguous block of memory of + * size at least secp256k1_context_preallocated_size(flags) + * bytes, as detailed above (cannot be NULL) + * flags: which parts of the context to initialize. + * + * See also secp256k1_context_randomize (in secp256k1.h) + * and secp256k1_context_preallocated_destroy. + */ +SECP256K1_API secp256k1_context* secp256k1_context_preallocated_create( + void* prealloc, + unsigned int flags +) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; + +/** Determine the memory size of a secp256k1 context object to be copied into + * caller-provided memory. + * + * Returns: the required size of the caller-provided memory block. + * In: ctx: an existing context to copy (cannot be NULL) + */ +SECP256K1_API size_t secp256k1_context_preallocated_clone_size( + const secp256k1_context* ctx +) SECP256K1_ARG_NONNULL(1) SECP256K1_WARN_UNUSED_RESULT; + +/** Copy a secp256k1 context object into caller-provided memory. + * + * The caller must provide a pointer to a rewritable contiguous block of memory + * of size at least secp256k1_context_preallocated_size(flags) bytes, suitably + * aligned to hold an object of any type. + * + * The block of memory is exclusively owned by the created context object during + * the lifetime of this context object, see the description of + * secp256k1_context_preallocated_create for details. + * + * Returns: a newly created context object. + * Args: ctx: an existing context to copy (cannot be NULL) + * In: prealloc: a pointer to a rewritable contiguous block of memory of + * size at least secp256k1_context_preallocated_size(flags) + * bytes, as detailed above (cannot be NULL) + */ +SECP256K1_API secp256k1_context* secp256k1_context_preallocated_clone( + const secp256k1_context* ctx, + void* prealloc +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_WARN_UNUSED_RESULT; + +/** Destroy a secp256k1 context object that has been created in + * caller-provided memory. + * + * The context pointer may not be used afterwards. + * + * The context to destroy must have been created using + * secp256k1_context_preallocated_create or secp256k1_context_preallocated_clone. + * If the context has instead been created using secp256k1_context_create or + * secp256k1_context_clone, the behaviour is undefined. In that case, + * secp256k1_context_destroy must be used instead. + * + * If required, it is the responsibility of the caller to deallocate the block + * of memory properly after this function returns, e.g., by calling free on the + * preallocated pointer given to secp256k1_context_preallocated_create or + * secp256k1_context_preallocated_clone. + * + * Args: ctx: an existing context to destroy, constructed using + * secp256k1_context_preallocated_create or + * secp256k1_context_preallocated_clone (cannot be NULL) + */ +SECP256K1_API void secp256k1_context_preallocated_destroy( + secp256k1_context* ctx +); + +#ifdef __cplusplus +} +#endif + +#endif /* SECP256K1_PREALLOCATED_H */ diff --git a/depend/secp256k1/libsecp256k1.pc.in b/depend/secp256k1/libsecp256k1.pc.in index a0d006f11..694e98eef 100644 --- a/depend/secp256k1/libsecp256k1.pc.in +++ b/depend/secp256k1/libsecp256k1.pc.in @@ -8,6 +8,6 @@ Description: Optimized C library for EC operations on curve secp256k1 URL: https://github.com/bitcoin-core/secp256k1 Version: @PACKAGE_VERSION@ Cflags: -I${includedir} -Libs.private: @SECP_LIBS@ Libs: -L${libdir} -lsecp256k1 +Libs.private: @SECP_LIBS@ diff --git a/depend/secp256k1/src/asm/field_10x26_arm.s b/depend/secp256k1/src/asm/field_10x26_arm.s index 5a9cc3ffc..9a5bd0672 100644 --- a/depend/secp256k1/src/asm/field_10x26_arm.s +++ b/depend/secp256k1/src/asm/field_10x26_arm.s @@ -16,15 +16,9 @@ Note: */ .syntax unified - .arch armv7-a @ eabi attributes - see readelf -A - .eabi_attribute 8, 1 @ Tag_ARM_ISA_use = yes - .eabi_attribute 9, 0 @ Tag_Thumb_ISA_use = no - .eabi_attribute 10, 0 @ Tag_FP_arch = none .eabi_attribute 24, 1 @ Tag_ABI_align_needed = 8-byte .eabi_attribute 25, 1 @ Tag_ABI_align_preserved = 8-byte, except leaf SP - .eabi_attribute 30, 2 @ Tag_ABI_optimization_goals = Aggressive Speed - .eabi_attribute 34, 1 @ Tag_CPU_unaligned_access = v6 .text @ Field constants diff --git a/depend/secp256k1/src/basic-config.h b/depend/secp256k1/src/basic-config.h index fc588061c..d29bd8e3e 100644 --- a/depend/secp256k1/src/basic-config.h +++ b/depend/secp256k1/src/basic-config.h @@ -10,7 +10,10 @@ #ifdef USE_BASIC_CONFIG #undef USE_ASM_X86_64 +#undef USE_ECMULT_STATIC_PRECOMPUTATION #undef USE_ENDOMORPHISM +#undef USE_EXTERNAL_ASM +#undef USE_EXTERNAL_DEFAULT_CALLBACKS #undef USE_FIELD_10X26 #undef USE_FIELD_5X52 #undef USE_FIELD_INV_BUILTIN @@ -27,6 +30,7 @@ #define USE_SCALAR_INV_BUILTIN 1 #define USE_FIELD_10X26 1 #define USE_SCALAR_8X32 1 +#define ECMULT_WINDOW_SIZE 4 #endif /* USE_BASIC_CONFIG */ diff --git a/depend/secp256k1/src/bench_ecmult.c b/depend/secp256k1/src/bench_ecmult.c index 52d0476a3..7b5d185dc 100644 --- a/depend/secp256k1/src/bench_ecmult.c +++ b/depend/secp256k1/src/bench_ecmult.c @@ -64,7 +64,7 @@ static void bench_ecmult(void* arg) { size_t iter; for (iter = 0; iter < iters; ++iter) { - data->ecmult_multi(&data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g); + data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g); data->offset1 = (data->offset1 + count) % POINTS; data->offset2 = (data->offset2 + count - 1) % POINTS; } @@ -139,6 +139,11 @@ int main(int argc, char **argv) { secp256k1_gej* pubkeys_gej; size_t scratch_size; + data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; + data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size); + data.ecmult_multi = secp256k1_ecmult_multi_var; + if (argc > 1) { if(have_flag(argc, argv, "pippenger_wnaf")) { printf("Using pippenger_wnaf:\n"); @@ -146,15 +151,19 @@ int main(int argc, char **argv) { } else if(have_flag(argc, argv, "strauss_wnaf")) { printf("Using strauss_wnaf:\n"); data.ecmult_multi = secp256k1_ecmult_strauss_batch_single; + } else if(have_flag(argc, argv, "simple")) { + printf("Using simple algorithm:\n"); + data.ecmult_multi = secp256k1_ecmult_multi_var; + secp256k1_scratch_space_destroy(data.ctx, data.scratch); + data.scratch = NULL; + } else { + fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]); + fprintf(stderr, "Use 'pippenger_wnaf', 'strauss_wnaf', 'simple' or no argument to benchmark a combined algorithm.\n"); + return 1; } - } else { - data.ecmult_multi = secp256k1_ecmult_multi_var; } /* Allocate stuff */ - data.ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - scratch_size = secp256k1_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; - data.scratch = secp256k1_scratch_space_create(data.ctx, scratch_size); data.scalars = malloc(sizeof(secp256k1_scalar) * POINTS); data.seckeys = malloc(sizeof(secp256k1_scalar) * POINTS); data.pubkeys = malloc(sizeof(secp256k1_ge) * POINTS); @@ -172,7 +181,7 @@ int main(int argc, char **argv) { secp256k1_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); } } - secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS, &data.ctx->error_callback); + secp256k1_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS); free(pubkeys_gej); for (i = 1; i <= 8; ++i) { @@ -184,8 +193,10 @@ int main(int argc, char **argv) { run_test(&data, i << p, 1); } } + if (data.scratch != NULL) { + secp256k1_scratch_space_destroy(data.ctx, data.scratch); + } secp256k1_context_destroy(data.ctx); - secp256k1_scratch_space_destroy(data.scratch); free(data.scalars); free(data.pubkeys); free(data.seckeys); diff --git a/depend/secp256k1/src/bench_internal.c b/depend/secp256k1/src/bench_internal.c index 9c0a07fbb..a8f4e9e12 100644 --- a/depend/secp256k1/src/bench_internal.c +++ b/depend/secp256k1/src/bench_internal.c @@ -184,9 +184,11 @@ void bench_field_inverse_var(void* arg) { void bench_field_sqrt(void* arg) { int i; bench_inv *data = (bench_inv*)arg; + secp256k1_fe t; for (i = 0; i < 20000; i++) { - secp256k1_fe_sqrt(&data->fe_x, &data->fe_x); + t = data->fe_x; + secp256k1_fe_sqrt(&data->fe_x, &t); secp256k1_fe_add(&data->fe_x, &data->fe_y); } } @@ -251,7 +253,7 @@ void bench_wnaf_const(void* arg) { bench_inv *data = (bench_inv*)arg; for (i = 0; i < 20000; i++) { - secp256k1_wnaf_const(data->wnaf, data->scalar_x, WINDOW_A, 256); + secp256k1_wnaf_const(data->wnaf, &data->scalar_x, WINDOW_A, 256); secp256k1_scalar_add(&data->scalar_x, &data->scalar_x, &data->scalar_y); } } diff --git a/depend/secp256k1/src/ecdsa_impl.h b/depend/secp256k1/src/ecdsa_impl.h index c3400042d..eb099c87d 100644 --- a/depend/secp256k1/src/ecdsa_impl.h +++ b/depend/secp256k1/src/ecdsa_impl.h @@ -46,68 +46,73 @@ static const secp256k1_fe secp256k1_ecdsa_const_p_minus_order = SECP256K1_FE_CON 0, 0, 0, 1, 0x45512319UL, 0x50B75FC4UL, 0x402DA172UL, 0x2FC9BAEEUL ); -static int secp256k1_der_read_len(const unsigned char **sigp, const unsigned char *sigend) { - int lenleft, b1; - size_t ret = 0; +static int secp256k1_der_read_len(size_t *len, const unsigned char **sigp, const unsigned char *sigend) { + size_t lenleft; + unsigned char b1; + VERIFY_CHECK(len != NULL); + *len = 0; if (*sigp >= sigend) { - return -1; + return 0; } b1 = *((*sigp)++); if (b1 == 0xFF) { /* X.690-0207 8.1.3.5.c the value 0xFF shall not be used. */ - return -1; + return 0; } if ((b1 & 0x80) == 0) { /* X.690-0207 8.1.3.4 short form length octets */ - return b1; + *len = b1; + return 1; } if (b1 == 0x80) { /* Indefinite length is not allowed in DER. */ - return -1; + return 0; } /* X.690-207 8.1.3.5 long form length octets */ - lenleft = b1 & 0x7F; - if (lenleft > sigend - *sigp) { - return -1; + lenleft = b1 & 0x7F; /* lenleft is at least 1 */ + if (lenleft > (size_t)(sigend - *sigp)) { + return 0; } if (**sigp == 0) { /* Not the shortest possible length encoding. */ - return -1; + return 0; } - if ((size_t)lenleft > sizeof(size_t)) { + if (lenleft > sizeof(size_t)) { /* The resulting length would exceed the range of a size_t, so * certainly longer than the passed array size. */ - return -1; + return 0; } while (lenleft > 0) { - ret = (ret << 8) | **sigp; - if (ret + lenleft > (size_t)(sigend - *sigp)) { - /* Result exceeds the length of the passed array. */ - return -1; - } + *len = (*len << 8) | **sigp; (*sigp)++; lenleft--; } - if (ret < 128) { + if (*len > (size_t)(sigend - *sigp)) { + /* Result exceeds the length of the passed array. */ + return 0; + } + if (*len < 128) { /* Not the shortest possible length encoding. */ - return -1; + return 0; } - return ret; + return 1; } static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char **sig, const unsigned char *sigend) { int overflow = 0; unsigned char ra[32] = {0}; - int rlen; + size_t rlen; if (*sig == sigend || **sig != 0x02) { /* Not a primitive integer (X.690-0207 8.3.1). */ return 0; } (*sig)++; - rlen = secp256k1_der_read_len(sig, sigend); - if (rlen <= 0 || (*sig) + rlen > sigend) { + if (secp256k1_der_read_len(&rlen, sig, sigend) == 0) { + return 0; + } + if (rlen == 0 || *sig + rlen > sigend) { /* Exceeds bounds or not at least length 1 (X.690-0207 8.3.1). */ return 0; } @@ -123,8 +128,11 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char /* Negative. */ overflow = 1; } - while (rlen > 0 && **sig == 0) { - /* Skip leading zero bytes */ + /* There is at most one leading zero byte: + * if there were two leading zero bytes, we would have failed and returned 0 + * because of excessive 0x00 padding already. */ + if (rlen > 0 && **sig == 0) { + /* Skip leading zero byte */ rlen--; (*sig)++; } @@ -144,18 +152,16 @@ static int secp256k1_der_parse_integer(secp256k1_scalar *r, const unsigned char static int secp256k1_ecdsa_sig_parse(secp256k1_scalar *rr, secp256k1_scalar *rs, const unsigned char *sig, size_t size) { const unsigned char *sigend = sig + size; - int rlen; + size_t rlen; if (sig == sigend || *(sig++) != 0x30) { /* The encoding doesn't start with a constructed sequence (X.690-0207 8.9.1). */ return 0; } - rlen = secp256k1_der_read_len(&sig, sigend); - if (rlen < 0 || sig + rlen > sigend) { - /* Tuple exceeds bounds */ + if (secp256k1_der_read_len(&rlen, &sig, sigend) == 0) { return 0; } - if (sig + rlen != sigend) { - /* Garbage after tuple. */ + if (rlen != (size_t)(sigend - sig)) { + /* Tuple exceeds bounds or garage after tuple. */ return 0; } diff --git a/depend/secp256k1/src/eckey_impl.h b/depend/secp256k1/src/eckey_impl.h index 1ab9a68ec..7c5b78932 100644 --- a/depend/secp256k1/src/eckey_impl.h +++ b/depend/secp256k1/src/eckey_impl.h @@ -18,7 +18,7 @@ static int secp256k1_eckey_pubkey_parse(secp256k1_ge *elem, const unsigned char if (size == 33 && (pub[0] == SECP256K1_TAG_PUBKEY_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_ODD)) { secp256k1_fe x; return secp256k1_fe_set_b32(&x, pub+1) && secp256k1_ge_set_xo_var(elem, &x, pub[0] == SECP256K1_TAG_PUBKEY_ODD); - } else if (size == 65 && (pub[0] == 0x04 || pub[0] == 0x06 || pub[0] == 0x07)) { + } else if (size == 65 && (pub[0] == SECP256K1_TAG_PUBKEY_UNCOMPRESSED || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_EVEN || pub[0] == SECP256K1_TAG_PUBKEY_HYBRID_ODD)) { secp256k1_fe x, y; if (!secp256k1_fe_set_b32(&x, pub+1) || !secp256k1_fe_set_b32(&y, pub+33)) { return 0; diff --git a/depend/secp256k1/src/ecmult.h b/depend/secp256k1/src/ecmult.h index ea1cd8a21..c9b198239 100644 --- a/depend/secp256k1/src/ecmult.h +++ b/depend/secp256k1/src/ecmult.h @@ -20,10 +20,10 @@ typedef struct { #endif } secp256k1_ecmult_context; +static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx); -static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb); -static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst, - const secp256k1_ecmult_context *src, const secp256k1_callback *cb); +static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc); +static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src); static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx); static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx); @@ -37,11 +37,12 @@ typedef int (secp256k1_ecmult_multi_callback)(secp256k1_scalar *sc, secp256k1_ge * Chooses the right algorithm for a given number of points and scratch space * size. Resets and overwrites the given scratch space. If the points do not * fit in the scratch space the algorithm is repeatedly run with batches of - * points. + * points. If no scratch space is given then a simple algorithm is used that + * simply multiplies the points with the corresponding scalars and adds them up. * Returns: 1 on success (including when inp_g_sc is NULL and n is 0) * 0 if there is not enough scratch space for a single point or * callback returns 0 */ -static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n); +static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n); #endif /* SECP256K1_ECMULT_H */ diff --git a/depend/secp256k1/src/ecmult_const_impl.h b/depend/secp256k1/src/ecmult_const_impl.h index 8411752eb..aaa576ada 100644 --- a/depend/secp256k1/src/ecmult_const_impl.h +++ b/depend/secp256k1/src/ecmult_const_impl.h @@ -48,7 +48,7 @@ * * Numbers reference steps of `Algorithm SPA-resistant Width-w NAF with Odd Scalar` on pp. 335 */ -static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) { +static int secp256k1_wnaf_const(int *wnaf, const secp256k1_scalar *scalar, int w, int size) { int global_sign; int skew = 0; int word = 0; @@ -59,8 +59,12 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) int flip; int bit; - secp256k1_scalar neg_s; + secp256k1_scalar s; int not_neg_one; + + VERIFY_CHECK(w > 0); + VERIFY_CHECK(size > 0); + /* Note that we cannot handle even numbers by negating them to be odd, as is * done in other implementations, since if our scalars were specified to have * width < 256 for performance reasons, their negations would have width 256 @@ -75,12 +79,13 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) * {1, 2} we want to add to the scalar when ensuring that it's odd. Further * complicating things, -1 interacts badly with `secp256k1_scalar_cadd_bit` and * we need to special-case it in this logic. */ - flip = secp256k1_scalar_is_high(&s); + flip = secp256k1_scalar_is_high(scalar); /* We add 1 to even numbers, 2 to odd ones, noting that negation flips parity */ - bit = flip ^ !secp256k1_scalar_is_even(&s); + bit = flip ^ !secp256k1_scalar_is_even(scalar); /* We check for negative one, since adding 2 to it will cause an overflow */ - secp256k1_scalar_negate(&neg_s, &s); - not_neg_one = !secp256k1_scalar_is_one(&neg_s); + secp256k1_scalar_negate(&s, scalar); + not_neg_one = !secp256k1_scalar_is_one(&s); + s = *scalar; secp256k1_scalar_cadd_bit(&s, bit, not_neg_one); /* If we had negative one, flip == 1, s.d[0] == 0, bit == 1, so caller expects * that we added two to it and flipped it. In fact for -1 these operations are @@ -93,7 +98,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) /* 4 */ u_last = secp256k1_scalar_shr_int(&s, w); - while (word * w < size) { + do { int sign; int even; @@ -109,7 +114,7 @@ static int secp256k1_wnaf_const(int *wnaf, secp256k1_scalar s, int w, int size) wnaf[word++] = u_last * global_sign; u_last = u; - } + } while (word * w < size); wnaf[word] = u * global_sign; VERIFY_CHECK(secp256k1_scalar_is_zero(&s)); @@ -132,7 +137,6 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons int wnaf_1[1 + WNAF_SIZE(WINDOW_A - 1)]; int i; - secp256k1_scalar sc = *scalar; /* build wnaf representation for q. */ int rsize = size; @@ -140,13 +144,13 @@ static void secp256k1_ecmult_const(secp256k1_gej *r, const secp256k1_ge *a, cons if (size > 128) { rsize = 128; /* split q into q_1 and q_lam (where q = q_1 + q_lam*lambda, and q_1 and q_lam are ~128 bit) */ - secp256k1_scalar_split_lambda(&q_1, &q_lam, &sc); - skew_1 = secp256k1_wnaf_const(wnaf_1, q_1, WINDOW_A - 1, 128); - skew_lam = secp256k1_wnaf_const(wnaf_lam, q_lam, WINDOW_A - 1, 128); + secp256k1_scalar_split_lambda(&q_1, &q_lam, scalar); + skew_1 = secp256k1_wnaf_const(wnaf_1, &q_1, WINDOW_A - 1, 128); + skew_lam = secp256k1_wnaf_const(wnaf_lam, &q_lam, WINDOW_A - 1, 128); } else #endif { - skew_1 = secp256k1_wnaf_const(wnaf_1, sc, WINDOW_A - 1, size); + skew_1 = secp256k1_wnaf_const(wnaf_1, scalar, WINDOW_A - 1, size); #ifdef USE_ENDOMORPHISM skew_lam = 0; #endif diff --git a/depend/secp256k1/src/ecmult_gen.h b/depend/secp256k1/src/ecmult_gen.h index 7564b7015..5364cddef 100644 --- a/depend/secp256k1/src/ecmult_gen.h +++ b/depend/secp256k1/src/ecmult_gen.h @@ -10,28 +10,35 @@ #include "scalar.h" #include "group.h" +#ifndef ECMULT_GEN_PREC_BITS +#define ECMULT_GEN_PREC_BITS 4/* 4 bits: 64kB table, fastest; 2 bits: 32kB table, ~75% speed */ +#endif +#define ECMULT_GEN_PREC_B ECMULT_GEN_PREC_BITS +#define ECMULT_GEN_PREC_G (1 << ECMULT_GEN_PREC_B) +#define ECMULT_GEN_PREC_N (64 / ECMULT_GEN_PREC_B) + typedef struct { /* For accelerating the computation of a*G: * To harden against timing attacks, use the following mechanism: - * * Break up the multiplicand into groups of 4 bits, called n_0, n_1, n_2, ..., n_63. - * * Compute sum(n_i * 16^i * G + U_i, i=0..63), where: - * * U_i = U * 2^i (for i=0..62) - * * U_i = U * (1-2^63) (for i=63) - * where U is a point with no known corresponding scalar. Note that sum(U_i, i=0..63) = 0. - * For each i, and each of the 16 possible values of n_i, (n_i * 16^i * G + U_i) is - * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0..63). + * * Break up the multiplicand into groups of PREC_B bits, called n_0, n_1, n_2, ..., n_(PREC_N-1). + * * Compute sum(n_i * (PREC_G)^i * G + U_i, i=0 ... PREC_N-1), where: + * * U_i = U * 2^i, for i=0 ... PREC_N-2 + * * U_i = U * (1-2^(PREC_N-1)), for i=PREC_N-1 + * where U is a point with no known corresponding scalar. Note that sum(U_i, i=0 ... PREC_N-1) = 0. + * For each i, and each of the PREC_G possible values of n_i, (n_i * (PREC_G)^i * G + U_i) is + * precomputed (call it prec(i, n_i)). The formula now becomes sum(prec(i, n_i), i=0 ... PREC_N-1). * None of the resulting prec group elements have a known scalar, and neither do any of * the intermediate sums while computing a*G. */ - secp256k1_ge_storage (*prec)[64][16]; /* prec[j][i] = 16^j * i * G + U_i */ + secp256k1_ge_storage (*prec)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G]; /* prec[j][i] = (PREC_G)^j * i * G + U_i */ secp256k1_scalar blind; secp256k1_gej initial; } secp256k1_ecmult_gen_context; +static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context* ctx); -static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, const secp256k1_callback* cb); -static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst, - const secp256k1_ecmult_gen_context* src, const secp256k1_callback* cb); +static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context* ctx, void **prealloc); +static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context* src); static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context* ctx); static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_context* ctx); diff --git a/depend/secp256k1/src/ecmult_gen_impl.h b/depend/secp256k1/src/ecmult_gen_impl.h index 714f02e94..0de0742c6 100644 --- a/depend/secp256k1/src/ecmult_gen_impl.h +++ b/depend/secp256k1/src/ecmult_gen_impl.h @@ -7,6 +7,7 @@ #ifndef SECP256K1_ECMULT_GEN_IMPL_H #define SECP256K1_ECMULT_GEN_IMPL_H +#include "util.h" #include "scalar.h" #include "group.h" #include "ecmult_gen.h" @@ -14,23 +15,32 @@ #ifdef USE_ECMULT_STATIC_PRECOMPUTATION #include "ecmult_static_context.h" #endif + +#ifndef USE_ECMULT_STATIC_PRECOMPUTATION + static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = ROUND_TO_ALIGN(sizeof(*((secp256k1_ecmult_gen_context*) NULL)->prec)); +#else + static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE = 0; +#endif + static void secp256k1_ecmult_gen_context_init(secp256k1_ecmult_gen_context *ctx) { ctx->prec = NULL; } -static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, const secp256k1_callback* cb) { +static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx, void **prealloc) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - secp256k1_ge prec[1024]; + secp256k1_ge prec[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; secp256k1_gej gj; secp256k1_gej nums_gej; int i, j; + size_t const prealloc_size = SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; + void* const base = *prealloc; #endif if (ctx->prec != NULL) { return; } #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - ctx->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*ctx->prec)); + ctx->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])manual_alloc(prealloc, prealloc_size, base, prealloc_size); /* get the generator */ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); @@ -54,39 +64,39 @@ static void secp256k1_ecmult_gen_context_build(secp256k1_ecmult_gen_context *ctx /* compute prec. */ { - secp256k1_gej precj[1024]; /* Jacobian versions of prec. */ + secp256k1_gej precj[ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G]; /* Jacobian versions of prec. */ secp256k1_gej gbase; secp256k1_gej numsbase; - gbase = gj; /* 16^j * G */ + gbase = gj; /* PREC_G^j * G */ numsbase = nums_gej; /* 2^j * nums. */ - for (j = 0; j < 64; j++) { - /* Set precj[j*16 .. j*16+15] to (numsbase, numsbase + gbase, ..., numsbase + 15*gbase). */ - precj[j*16] = numsbase; - for (i = 1; i < 16; i++) { - secp256k1_gej_add_var(&precj[j*16 + i], &precj[j*16 + i - 1], &gbase, NULL); + for (j = 0; j < ECMULT_GEN_PREC_N; j++) { + /* Set precj[j*PREC_G .. j*PREC_G+(PREC_G-1)] to (numsbase, numsbase + gbase, ..., numsbase + (PREC_G-1)*gbase). */ + precj[j*ECMULT_GEN_PREC_G] = numsbase; + for (i = 1; i < ECMULT_GEN_PREC_G; i++) { + secp256k1_gej_add_var(&precj[j*ECMULT_GEN_PREC_G + i], &precj[j*ECMULT_GEN_PREC_G + i - 1], &gbase, NULL); } - /* Multiply gbase by 16. */ - for (i = 0; i < 4; i++) { + /* Multiply gbase by PREC_G. */ + for (i = 0; i < ECMULT_GEN_PREC_B; i++) { secp256k1_gej_double_var(&gbase, &gbase, NULL); } /* Multiply numbase by 2. */ secp256k1_gej_double_var(&numsbase, &numsbase, NULL); - if (j == 62) { + if (j == ECMULT_GEN_PREC_N - 2) { /* In the last iteration, numsbase is (1 - 2^j) * nums instead. */ secp256k1_gej_neg(&numsbase, &numsbase); secp256k1_gej_add_var(&numsbase, &numsbase, &nums_gej, NULL); } } - secp256k1_ge_set_all_gej_var(prec, precj, 1024, cb); + secp256k1_ge_set_all_gej_var(prec, precj, ECMULT_GEN_PREC_N * ECMULT_GEN_PREC_G); } - for (j = 0; j < 64; j++) { - for (i = 0; i < 16; i++) { - secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*16 + i]); + for (j = 0; j < ECMULT_GEN_PREC_N; j++) { + for (i = 0; i < ECMULT_GEN_PREC_G; i++) { + secp256k1_ge_to_storage(&(*ctx->prec)[j][i], &prec[j*ECMULT_GEN_PREC_G + i]); } } #else - (void)cb; - ctx->prec = (secp256k1_ge_storage (*)[64][16])secp256k1_ecmult_static_context; + (void)prealloc; + ctx->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])secp256k1_ecmult_static_context; #endif secp256k1_ecmult_gen_blind(ctx, NULL); } @@ -95,27 +105,18 @@ static int secp256k1_ecmult_gen_context_is_built(const secp256k1_ecmult_gen_cont return ctx->prec != NULL; } -static void secp256k1_ecmult_gen_context_clone(secp256k1_ecmult_gen_context *dst, - const secp256k1_ecmult_gen_context *src, const secp256k1_callback* cb) { - if (src->prec == NULL) { - dst->prec = NULL; - } else { +static void secp256k1_ecmult_gen_context_finalize_memcpy(secp256k1_ecmult_gen_context *dst, const secp256k1_ecmult_gen_context *src) { #ifndef USE_ECMULT_STATIC_PRECOMPUTATION - dst->prec = (secp256k1_ge_storage (*)[64][16])checked_malloc(cb, sizeof(*dst->prec)); - memcpy(dst->prec, src->prec, sizeof(*dst->prec)); + if (src->prec != NULL) { + /* We cast to void* first to suppress a -Wcast-align warning. */ + dst->prec = (secp256k1_ge_storage (*)[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G])(void*)((unsigned char*)dst + ((unsigned char*)src->prec - (unsigned char*)src)); + } #else - (void)cb; - dst->prec = src->prec; + (void)dst, (void)src; #endif - dst->initial = src->initial; - dst->blind = src->blind; - } } static void secp256k1_ecmult_gen_context_clear(secp256k1_ecmult_gen_context *ctx) { -#ifndef USE_ECMULT_STATIC_PRECOMPUTATION - free(ctx->prec); -#endif secp256k1_scalar_clear(&ctx->blind); secp256k1_gej_clear(&ctx->initial); ctx->prec = NULL; @@ -132,9 +133,9 @@ static void secp256k1_ecmult_gen(const secp256k1_ecmult_gen_context *ctx, secp25 /* Blind scalar/point multiplication by computing (n-b)G + bG instead of nG. */ secp256k1_scalar_add(&gnb, gn, &ctx->blind); add.infinity = 0; - for (j = 0; j < 64; j++) { - bits = secp256k1_scalar_get_bits(&gnb, j * 4, 4); - for (i = 0; i < 16; i++) { + for (j = 0; j < ECMULT_GEN_PREC_N; j++) { + bits = secp256k1_scalar_get_bits(&gnb, j * ECMULT_GEN_PREC_B, ECMULT_GEN_PREC_B); + for (i = 0; i < ECMULT_GEN_PREC_G; i++) { /** This uses a conditional move to avoid any secret data in array indexes. * _Any_ use of secret indexes has been demonstrated to result in timing * sidechannels, even when the cache-line access patterns are uniform. diff --git a/depend/secp256k1/src/ecmult_impl.h b/depend/secp256k1/src/ecmult_impl.h index d5fb6c5b6..bb7adedfb 100644 --- a/depend/secp256k1/src/ecmult_impl.h +++ b/depend/secp256k1/src/ecmult_impl.h @@ -10,6 +10,7 @@ #include #include +#include "util.h" #include "group.h" #include "scalar.h" #include "ecmult.h" @@ -30,16 +31,32 @@ # endif #else /* optimal for 128-bit and 256-bit exponents. */ -#define WINDOW_A 5 -/** larger numbers may result in slightly better performance, at the cost of - exponentially larger precomputed tables. */ -#ifdef USE_ENDOMORPHISM -/** Two tables for window size 15: 1.375 MiB. */ -#define WINDOW_G 15 -#else -/** One table for window size 16: 1.375 MiB. */ -#define WINDOW_G 16 +# define WINDOW_A 5 +/** Larger values for ECMULT_WINDOW_SIZE result in possibly better + * performance at the cost of an exponentially larger precomputed + * table. The exact table size is + * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes, + * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can + * be larger due to platform-specific padding and alignment. + * If the endomorphism optimization is enabled (USE_ENDOMORMPHSIM) + * two tables of this size are used instead of only one. + */ +# define WINDOW_G ECMULT_WINDOW_SIZE #endif + +/* Noone will ever need more than a window size of 24. The code might + * be correct for larger values of ECMULT_WINDOW_SIZE but this is not + * not tested. + * + * The following limitations are known, and there are probably more: + * If WINDOW_G > 27 and size_t has 32 bits, then the code is incorrect + * because the size of the memory object that we allocate (in bytes) + * will not fit in a size_t. + * If WINDOW_G > 31 and int has 32 bits, then the code is incorrect + * because certain expressions will overflow. + */ +#if ECMULT_WINDOW_SIZE < 2 || ECMULT_WINDOW_SIZE > 24 +# error Set ECMULT_WINDOW_SIZE to an integer in range [2..24]. #endif #ifdef USE_ENDOMORPHISM @@ -137,24 +154,135 @@ static void secp256k1_ecmult_odd_multiples_table_globalz_windowa(secp256k1_ge *p secp256k1_ge_globalz_set_table_gej(ECMULT_TABLE_SIZE(WINDOW_A), pre, globalz, prej, zr); } -static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge_storage *pre, const secp256k1_gej *a, const secp256k1_callback *cb) { - secp256k1_gej *prej = (secp256k1_gej*)checked_malloc(cb, sizeof(secp256k1_gej) * n); - secp256k1_ge *prea = (secp256k1_ge*)checked_malloc(cb, sizeof(secp256k1_ge) * n); - secp256k1_fe *zr = (secp256k1_fe*)checked_malloc(cb, sizeof(secp256k1_fe) * n); +static void secp256k1_ecmult_odd_multiples_table_storage_var(const int n, secp256k1_ge_storage *pre, const secp256k1_gej *a) { + secp256k1_gej d; + secp256k1_ge d_ge, p_ge; + secp256k1_gej pj; + secp256k1_fe zi; + secp256k1_fe zr; + secp256k1_fe dx_over_dz_squared; int i; - /* Compute the odd multiples in Jacobian form. */ - secp256k1_ecmult_odd_multiples_table(n, prej, zr, a); - /* Convert them in batch to affine coordinates. */ - secp256k1_ge_set_table_gej_var(prea, prej, zr, n); - /* Convert them to compact storage form. */ - for (i = 0; i < n; i++) { - secp256k1_ge_to_storage(&pre[i], &prea[i]); + VERIFY_CHECK(!a->infinity); + + secp256k1_gej_double_var(&d, a, NULL); + + /* First, we perform all the additions in an isomorphic curve obtained by multiplying + * all `z` coordinates by 1/`d.z`. In these coordinates `d` is affine so we can use + * `secp256k1_gej_add_ge_var` to perform the additions. For each addition, we store + * the resulting y-coordinate and the z-ratio, since we only have enough memory to + * store two field elements. These are sufficient to efficiently undo the isomorphism + * and recompute all the `x`s. + */ + d_ge.x = d.x; + d_ge.y = d.y; + d_ge.infinity = 0; + + secp256k1_ge_set_gej_zinv(&p_ge, a, &d.z); + pj.x = p_ge.x; + pj.y = p_ge.y; + pj.z = a->z; + pj.infinity = 0; + + for (i = 0; i < (n - 1); i++) { + secp256k1_fe_normalize_var(&pj.y); + secp256k1_fe_to_storage(&pre[i].y, &pj.y); + secp256k1_gej_add_ge_var(&pj, &pj, &d_ge, &zr); + secp256k1_fe_normalize_var(&zr); + secp256k1_fe_to_storage(&pre[i].x, &zr); } - free(prea); - free(prej); - free(zr); + /* Invert d.z in the same batch, preserving pj.z so we can extract 1/d.z */ + secp256k1_fe_mul(&zi, &pj.z, &d.z); + secp256k1_fe_inv_var(&zi, &zi); + + /* Directly set `pre[n - 1]` to `pj`, saving the inverted z-coordinate so + * that we can combine it with the saved z-ratios to compute the other zs + * without any more inversions. */ + secp256k1_ge_set_gej_zinv(&p_ge, &pj, &zi); + secp256k1_ge_to_storage(&pre[n - 1], &p_ge); + + /* Compute the actual x-coordinate of D, which will be needed below. */ + secp256k1_fe_mul(&d.z, &zi, &pj.z); /* d.z = 1/d.z */ + secp256k1_fe_sqr(&dx_over_dz_squared, &d.z); + secp256k1_fe_mul(&dx_over_dz_squared, &dx_over_dz_squared, &d.x); + + /* Going into the second loop, we have set `pre[n-1]` to its final affine + * form, but still need to set `pre[i]` for `i` in 0 through `n-2`. We + * have `zi = (p.z * d.z)^-1`, where + * + * `p.z` is the z-coordinate of the point on the isomorphic curve + * which was ultimately assigned to `pre[n-1]`. + * `d.z` is the multiplier that must be applied to all z-coordinates + * to move from our isomorphic curve back to secp256k1; so the + * product `p.z * d.z` is the z-coordinate of the secp256k1 + * point assigned to `pre[n-1]`. + * + * All subsequent inverse-z-coordinates can be obtained by multiplying this + * factor by successive z-ratios, which is much more efficient than directly + * computing each one. + * + * Importantly, these inverse-zs will be coordinates of points on secp256k1, + * while our other stored values come from computations on the isomorphic + * curve. So in the below loop, we will take care not to actually use `zi` + * or any derived values until we're back on secp256k1. + */ + i = n - 1; + while (i > 0) { + secp256k1_fe zi2, zi3; + const secp256k1_fe *rzr; + i--; + + secp256k1_ge_from_storage(&p_ge, &pre[i]); + + /* For each remaining point, we extract the z-ratio from the stored + * x-coordinate, compute its z^-1 from that, and compute the full + * point from that. */ + rzr = &p_ge.x; + secp256k1_fe_mul(&zi, &zi, rzr); + secp256k1_fe_sqr(&zi2, &zi); + secp256k1_fe_mul(&zi3, &zi2, &zi); + /* To compute the actual x-coordinate, we use the stored z ratio and + * y-coordinate, which we obtained from `secp256k1_gej_add_ge_var` + * in the loop above, as well as the inverse of the square of its + * z-coordinate. We store the latter in the `zi2` variable, which is + * computed iteratively starting from the overall Z inverse then + * multiplying by each z-ratio in turn. + * + * Denoting the z-ratio as `rzr`, we observe that it is equal to `h` + * from the inside of the above `gej_add_ge_var` call. This satisfies + * + * rzr = d_x * z^2 - x * d_z^2 + * + * where (`d_x`, `d_z`) are Jacobian coordinates of `D` and `(x, z)` + * are Jacobian coordinates of our desired point -- except both are on + * the isomorphic curve that we were using when we called `gej_add_ge_var`. + * To get back to secp256k1, we must multiply both `z`s by `d_z`, or + * equivalently divide both `x`s by `d_z^2`. Our equation then becomes + * + * rzr = d_x * z^2 / d_z^2 - x + * + * (The left-hand-side, being a ratio of z-coordinates, is unaffected + * by the isomorphism.) + * + * Rearranging to solve for `x`, we have + * + * x = d_x * z^2 / d_z^2 - rzr + * + * But what we actually want is the affine coordinate `X = x/z^2`, + * which will satisfy + * + * X = d_x / d_z^2 - rzr / z^2 + * = dx_over_dz_squared - rzr * zi2 + */ + secp256k1_fe_mul(&p_ge.x, rzr, &zi2); + secp256k1_fe_negate(&p_ge.x, &p_ge.x, 1); + secp256k1_fe_add(&p_ge.x, &dx_over_dz_squared); + /* y is stored_y/z^3, as we expect */ + secp256k1_fe_mul(&p_ge.y, &p_ge.y, &zi3); + /* Store */ + secp256k1_ge_to_storage(&pre[i], &p_ge); + } } /** The following two macro retrieves a particular odd multiple from a table @@ -166,7 +294,8 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge if ((n) > 0) { \ *(r) = (pre)[((n)-1)/2]; \ } else { \ - secp256k1_ge_neg((r), &(pre)[(-(n)-1)/2]); \ + *(r) = (pre)[(-(n)-1)/2]; \ + secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) @@ -178,10 +307,17 @@ static void secp256k1_ecmult_odd_multiples_table_storage_var(int n, secp256k1_ge secp256k1_ge_from_storage((r), &(pre)[((n)-1)/2]); \ } else { \ secp256k1_ge_from_storage((r), &(pre)[(-(n)-1)/2]); \ - secp256k1_ge_neg((r), (r)); \ + secp256k1_fe_negate(&((r)->y), &((r)->y), 1); \ } \ } while(0) +static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE = + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) +#ifdef USE_ENDOMORPHISM + + ROUND_TO_ALIGN(sizeof((*((secp256k1_ecmult_context*) NULL)->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)) +#endif + ; + static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) { ctx->pre_g = NULL; #ifdef USE_ENDOMORPHISM @@ -189,8 +325,10 @@ static void secp256k1_ecmult_context_init(secp256k1_ecmult_context *ctx) { #endif } -static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const secp256k1_callback *cb) { +static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, void **prealloc) { secp256k1_gej gj; + void* const base = *prealloc; + size_t const prealloc_size = SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; if (ctx->pre_g != NULL) { return; @@ -199,44 +337,44 @@ static void secp256k1_ecmult_context_build(secp256k1_ecmult_context *ctx, const /* get the generator */ secp256k1_gej_set_ge(&gj, &secp256k1_ge_const_g); - ctx->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); + { + size_t size = sizeof((*ctx->pre_g)[0]) * ((size_t)ECMULT_TABLE_SIZE(WINDOW_G)); + /* check for overflow */ + VERIFY_CHECK(size / sizeof((*ctx->pre_g)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); + ctx->pre_g = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); + } /* precompute the tables with odd multiples */ - secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj, cb); + secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g, &gj); #ifdef USE_ENDOMORPHISM { secp256k1_gej g_128j; int i; - ctx->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G)); + size_t size = sizeof((*ctx->pre_g_128)[0]) * ((size_t) ECMULT_TABLE_SIZE(WINDOW_G)); + /* check for overflow */ + VERIFY_CHECK(size / sizeof((*ctx->pre_g_128)[0]) == ((size_t)ECMULT_TABLE_SIZE(WINDOW_G))); + ctx->pre_g_128 = (secp256k1_ge_storage (*)[])manual_alloc(prealloc, sizeof((*ctx->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G), base, prealloc_size); /* calculate 2^128*generator */ g_128j = gj; for (i = 0; i < 128; i++) { secp256k1_gej_double_var(&g_128j, &g_128j, NULL); } - secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j, cb); + secp256k1_ecmult_odd_multiples_table_storage_var(ECMULT_TABLE_SIZE(WINDOW_G), *ctx->pre_g_128, &g_128j); } #endif } -static void secp256k1_ecmult_context_clone(secp256k1_ecmult_context *dst, - const secp256k1_ecmult_context *src, const secp256k1_callback *cb) { - if (src->pre_g == NULL) { - dst->pre_g = NULL; - } else { - size_t size = sizeof((*dst->pre_g)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); - dst->pre_g = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); - memcpy(dst->pre_g, src->pre_g, size); +static void secp256k1_ecmult_context_finalize_memcpy(secp256k1_ecmult_context *dst, const secp256k1_ecmult_context *src) { + if (src->pre_g != NULL) { + /* We cast to void* first to suppress a -Wcast-align warning. */ + dst->pre_g = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g) - (unsigned char*)src)); } #ifdef USE_ENDOMORPHISM - if (src->pre_g_128 == NULL) { - dst->pre_g_128 = NULL; - } else { - size_t size = sizeof((*dst->pre_g_128)[0]) * ECMULT_TABLE_SIZE(WINDOW_G); - dst->pre_g_128 = (secp256k1_ge_storage (*)[])checked_malloc(cb, size); - memcpy(dst->pre_g_128, src->pre_g_128, size); + if (src->pre_g_128 != NULL) { + dst->pre_g_128 = (secp256k1_ge_storage (*)[])(void*)((unsigned char*)dst + ((unsigned char*)(src->pre_g_128) - (unsigned char*)src)); } #endif } @@ -246,10 +384,6 @@ static int secp256k1_ecmult_context_is_built(const secp256k1_ecmult_context *ctx } static void secp256k1_ecmult_context_clear(secp256k1_ecmult_context *ctx) { - free(ctx->pre_g); -#ifdef USE_ENDOMORPHISM - free(ctx->pre_g_128); -#endif secp256k1_ecmult_context_init(ctx); } @@ -306,7 +440,7 @@ static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, CHECK(carry == 0); while (bit < 256) { CHECK(secp256k1_scalar_get_bits(&s, bit++, 1) == 0); - } + } #endif return last_set_bit + 1; } @@ -514,52 +648,55 @@ static size_t secp256k1_strauss_scratch_size(size_t n_points) { return n_points*point_size; } -static int secp256k1_ecmult_strauss_batch(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { +static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { secp256k1_gej* points; secp256k1_scalar* scalars; struct secp256k1_strauss_state state; size_t i; + const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); secp256k1_gej_set_infinity(r); if (inp_g_sc == NULL && n_points == 0) { return 1; } - if (!secp256k1_scratch_allocate_frame(scratch, secp256k1_strauss_scratch_size(n_points), STRAUSS_SCRATCH_OBJECTS)) { - return 0; - } - points = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_gej)); - scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(scratch, n_points * sizeof(secp256k1_scalar)); - state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej)); - state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); + points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej)); + scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar)); + state.prej = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_gej)); + state.zr = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); #ifdef USE_ENDOMORPHISM - state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); + state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * 2 * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); state.pre_a_lam = state.pre_a + n_points * ECMULT_TABLE_SIZE(WINDOW_A); #else - state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); + state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); #endif - state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); + state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); + + if (points == NULL || scalars == NULL || state.prej == NULL || state.zr == NULL || state.pre_a == NULL) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); + return 0; + } for (i = 0; i < n_points; i++) { secp256k1_ge point; if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { - secp256k1_scratch_deallocate_frame(scratch); + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } secp256k1_gej_set_ge(&points[i], &point); } secp256k1_ecmult_strauss_wnaf(ctx, &state, r, n_points, points, scalars, inp_g_sc); - secp256k1_scratch_deallocate_frame(scratch); + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 1; } /* Wrapper for secp256k1_ecmult_multi_func interface */ -static int secp256k1_ecmult_strauss_batch_single(const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { - return secp256k1_ecmult_strauss_batch(actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); +static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *actx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { + return secp256k1_ecmult_strauss_batch(error_callback, actx, scratch, r, inp_g_sc, cb, cbdata, n, 0); } -static size_t secp256k1_strauss_max_points(secp256k1_scratch *scratch) { - return secp256k1_scratch_max_allocation(scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1); +static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) { + return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1); } /** Convert a number to WNAF notation. @@ -848,10 +985,11 @@ static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_windo size_t entries = n_points + 1; #endif size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); - return ((1<ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); + state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); + buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, (1<ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { + secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); return 0; } - points = (secp256k1_ge *) secp256k1_scratch_alloc(scratch, entries * sizeof(*points)); - scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(scratch, entries * sizeof(*scalars)); - state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(scratch, sizeof(*state_space)); - state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(scratch, entries * sizeof(*state_space->ps)); - state_space->wnaf_na = (int *) secp256k1_scratch_alloc(scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); - buckets = (secp256k1_gej *) secp256k1_scratch_alloc(scratch, (1< max_alloc) { break; } @@ -972,12 +1116,58 @@ static size_t secp256k1_pippenger_max_points(secp256k1_scratch *scratch) { return res; } -typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t); -static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { +/* Computes ecmult_multi by simply multiplying and adding each point. Does not + * require a scratch space */ +static int secp256k1_ecmult_multi_simple_var(const secp256k1_ecmult_context *ctx, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) { + size_t point_idx; + secp256k1_scalar szero; + secp256k1_gej tmpj; + + secp256k1_scalar_set_int(&szero, 0); + secp256k1_gej_set_infinity(r); + secp256k1_gej_set_infinity(&tmpj); + /* r = inp_g_sc*G */ + secp256k1_ecmult(ctx, r, &tmpj, &szero, inp_g_sc); + for (point_idx = 0; point_idx < n_points; point_idx++) { + secp256k1_ge point; + secp256k1_gej pointj; + secp256k1_scalar scalar; + if (!cb(&scalar, &point, point_idx, cbdata)) { + return 0; + } + /* r += scalar*point */ + secp256k1_gej_set_ge(&pointj, &point); + secp256k1_ecmult(ctx, &tmpj, &pointj, &scalar, NULL); + secp256k1_gej_add_var(r, r, &tmpj, NULL); + } + return 1; +} + +/* Compute the number of batches and the batch size given the maximum batch size and the + * total number of points */ +static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { + if (max_n_batch_points == 0) { + return 0; + } + if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) { + max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; + } + if (n == 0) { + *n_batches = 0; + *n_batch_points = 0; + return 1; + } + /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */ + *n_batches = 1 + (n - 1) / max_n_batch_points; + *n_batch_points = 1 + (n - 1) / *n_batches; + return 1; +} + +typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t); +static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, const secp256k1_ecmult_context *ctx, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { size_t i; - int (*f)(const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t); - size_t max_points; + int (*f)(const secp256k1_callback* error_callback, const secp256k1_ecmult_context*, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t); size_t n_batches; size_t n_batch_points; @@ -990,32 +1180,30 @@ static int secp256k1_ecmult_multi_var(const secp256k1_ecmult_context *ctx, secp2 secp256k1_ecmult(ctx, r, r, &szero, inp_g_sc); return 1; } - - max_points = secp256k1_pippenger_max_points(scratch); - if (max_points == 0) { - return 0; - } else if (max_points > ECMULT_MAX_POINTS_PER_BATCH) { - max_points = ECMULT_MAX_POINTS_PER_BATCH; + if (scratch == NULL) { + return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } - n_batches = (n+max_points-1)/max_points; - n_batch_points = (n+n_batches-1)/n_batches; + /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than + * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. + * As a first step check if there's enough space for Pippenger's algo (which requires less space + * than Strauss' algo) and if not, use the simple algorithm. */ + if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) { + return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); + } if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { f = secp256k1_ecmult_pippenger_batch; } else { - max_points = secp256k1_strauss_max_points(scratch); - if (max_points == 0) { - return 0; + if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) { + return secp256k1_ecmult_multi_simple_var(ctx, r, inp_g_sc, cb, cbdata, n); } - n_batches = (n+max_points-1)/max_points; - n_batch_points = (n+n_batches-1)/n_batches; f = secp256k1_ecmult_strauss_batch; } for(i = 0; i < n_batches; i++) { size_t nbp = n < n_batch_points ? n : n_batch_points; size_t offset = n_batch_points*i; secp256k1_gej tmp; - if (!f(ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { + if (!f(error_callback, ctx, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { return 0; } secp256k1_gej_add_var(r, r, &tmp, NULL); diff --git a/depend/secp256k1/src/field_10x26.h b/depend/secp256k1/src/field_10x26.h index 727c5267f..5ff03c8ab 100644 --- a/depend/secp256k1/src/field_10x26.h +++ b/depend/secp256k1/src/field_10x26.h @@ -10,7 +10,9 @@ #include typedef struct { - /* X = sum(i=0..9, elem[i]*2^26) mod n */ + /* X = sum(i=0..9, n[i]*2^(i*26)) mod p + * where p = 2^256 - 0x1000003D1 + */ uint32_t n[10]; #ifdef VERIFY int magnitude; diff --git a/depend/secp256k1/src/field_10x26_impl.h b/depend/secp256k1/src/field_10x26_impl.h index 94f8132fc..4ae4fdcec 100644 --- a/depend/secp256k1/src/field_10x26_impl.h +++ b/depend/secp256k1/src/field_10x26_impl.h @@ -8,7 +8,6 @@ #define SECP256K1_FIELD_REPR_IMPL_H #include "util.h" -#include "num.h" #include "field.h" #ifdef VERIFY @@ -486,7 +485,8 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint32_t *r, const uint32_t VERIFY_BITS(b[9], 26); /** [... a b c] is a shorthand for ... + a<<52 + b<<26 + c<<0 mod n. - * px is a shorthand for sum(a[i]*b[x-i], i=0..x). + * for 0 <= x <= 9, px is a shorthand for sum(a[i]*b[x-i], i=0..x). + * for 9 <= x <= 18, px is a shorthand for sum(a[i]*b[x-i], i=(x-9)..9) * Note that [x 0 0 0 0 0 0 0 0 0 0] = [x*R1 x*R0]. */ @@ -1069,6 +1069,7 @@ static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp2 secp256k1_fe_verify(a); secp256k1_fe_verify(b); VERIFY_CHECK(r != b); + VERIFY_CHECK(a != b); #endif secp256k1_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY diff --git a/depend/secp256k1/src/field_5x52.h b/depend/secp256k1/src/field_5x52.h index bccd8feb4..fc5bfe357 100644 --- a/depend/secp256k1/src/field_5x52.h +++ b/depend/secp256k1/src/field_5x52.h @@ -10,7 +10,9 @@ #include typedef struct { - /* X = sum(i=0..4, elem[i]*2^52) mod n */ + /* X = sum(i=0..4, n[i]*2^(i*52)) mod p + * where p = 2^256 - 0x1000003D1 + */ uint64_t n[5]; #ifdef VERIFY int magnitude; diff --git a/depend/secp256k1/src/field_5x52_impl.h b/depend/secp256k1/src/field_5x52_impl.h index 957c61b01..f4263320d 100644 --- a/depend/secp256k1/src/field_5x52_impl.h +++ b/depend/secp256k1/src/field_5x52_impl.h @@ -12,7 +12,6 @@ #endif #include "util.h" -#include "num.h" #include "field.h" #if defined(USE_ASM_X86_64) @@ -422,6 +421,7 @@ static void secp256k1_fe_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp2 secp256k1_fe_verify(a); secp256k1_fe_verify(b); VERIFY_CHECK(r != b); + VERIFY_CHECK(a != b); #endif secp256k1_fe_mul_inner(r->n, a->n, b->n); #ifdef VERIFY diff --git a/depend/secp256k1/src/field_5x52_int128_impl.h b/depend/secp256k1/src/field_5x52_int128_impl.h index 95a0d1791..bcbfb92ac 100644 --- a/depend/secp256k1/src/field_5x52_int128_impl.h +++ b/depend/secp256k1/src/field_5x52_int128_impl.h @@ -32,9 +32,11 @@ SECP256K1_INLINE static void secp256k1_fe_mul_inner(uint64_t *r, const uint64_t VERIFY_BITS(b[3], 56); VERIFY_BITS(b[4], 52); VERIFY_CHECK(r != b); + VERIFY_CHECK(a != b); /* [... a b c] is a shorthand for ... + a<<104 + b<<52 + c<<0 mod n. - * px is a shorthand for sum(a[i]*b[x-i], i=0..x). + * for 0 <= x <= 4, px is a shorthand for sum(a[i]*b[x-i], i=0..x). + * for 4 <= x <= 8, px is a shorthand for sum(a[i]*b[x-i], i=(x-4)..4) * Note that [x 0 0 0 0 0] = [x*R]. */ diff --git a/depend/secp256k1/src/field_impl.h b/depend/secp256k1/src/field_impl.h index 20428648a..6070caccf 100644 --- a/depend/secp256k1/src/field_impl.h +++ b/depend/secp256k1/src/field_impl.h @@ -12,6 +12,7 @@ #endif #include "util.h" +#include "num.h" #if defined(USE_FIELD_10X26) #include "field_10x26_impl.h" @@ -48,6 +49,8 @@ static int secp256k1_fe_sqrt(secp256k1_fe *r, const secp256k1_fe *a) { secp256k1_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; int j; + VERIFY_CHECK(r != a); + /** The binary representation of (p + 1)/4 has 3 blocks of 1s, with lengths in * { 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: * 1, [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] diff --git a/depend/secp256k1/src/gen_context.c b/depend/secp256k1/src/gen_context.c index 87d296ebf..eb995eff9 100644 --- a/depend/secp256k1/src/gen_context.c +++ b/depend/secp256k1/src/gen_context.c @@ -8,6 +8,7 @@ #include "basic-config.h" #include "include/secp256k1.h" +#include "util.h" #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" @@ -26,6 +27,7 @@ static const secp256k1_callback default_error_callback = { int main(int argc, char **argv) { secp256k1_ecmult_gen_context ctx; + void *prealloc, *base; int inner; int outer; FILE* fp; @@ -38,26 +40,28 @@ int main(int argc, char **argv) { fprintf(stderr, "Could not open src/ecmult_static_context.h for writing!\n"); return -1; } - + fprintf(fp, "#ifndef _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); fprintf(fp, "#define _SECP256K1_ECMULT_STATIC_CONTEXT_\n"); fprintf(fp, "#include \"src/group.h\"\n"); fprintf(fp, "#define SC SECP256K1_GE_STORAGE_CONST\n"); - fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[64][16] = {\n"); + fprintf(fp, "static const secp256k1_ge_storage secp256k1_ecmult_static_context[ECMULT_GEN_PREC_N][ECMULT_GEN_PREC_G] = {\n"); + base = checked_malloc(&default_error_callback, SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE); + prealloc = base; secp256k1_ecmult_gen_context_init(&ctx); - secp256k1_ecmult_gen_context_build(&ctx, &default_error_callback); - for(outer = 0; outer != 64; outer++) { + secp256k1_ecmult_gen_context_build(&ctx, &prealloc); + for(outer = 0; outer != ECMULT_GEN_PREC_N; outer++) { fprintf(fp,"{\n"); - for(inner = 0; inner != 16; inner++) { + for(inner = 0; inner != ECMULT_GEN_PREC_G; inner++) { fprintf(fp," SC(%uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu, %uu)", SECP256K1_GE_STORAGE_CONST_GET((*ctx.prec)[outer][inner])); - if (inner != 15) { + if (inner != ECMULT_GEN_PREC_G - 1) { fprintf(fp,",\n"); } else { fprintf(fp,"\n"); } } - if (outer != 63) { + if (outer != ECMULT_GEN_PREC_N - 1) { fprintf(fp,"},\n"); } else { fprintf(fp,"}\n"); @@ -65,10 +69,11 @@ int main(int argc, char **argv) { } fprintf(fp,"};\n"); secp256k1_ecmult_gen_context_clear(&ctx); - + free(base); + fprintf(fp, "#undef SC\n"); fprintf(fp, "#endif\n"); fclose(fp); - + return 0; } diff --git a/depend/secp256k1/src/group.h b/depend/secp256k1/src/group.h index 3947ea2dd..8e122ab42 100644 --- a/depend/secp256k1/src/group.h +++ b/depend/secp256k1/src/group.h @@ -65,12 +65,7 @@ static void secp256k1_ge_neg(secp256k1_ge *r, const secp256k1_ge *a); static void secp256k1_ge_set_gej(secp256k1_ge *r, secp256k1_gej *a); /** Set a batch of group elements equal to the inputs given in jacobian coordinates */ -static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb); - -/** Set a batch of group elements equal to the inputs given in jacobian - * coordinates (with known z-ratios). zr must contain the known z-ratios such - * that mul(a[i].z, zr[i+1]) == a[i+1].z. zr[0] is ignored. */ -static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len); +static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len); /** Bring a batch inputs given in jacobian coordinates (with known z-ratios) to * the same global z "denominator". zr must contain the known z-ratios such diff --git a/depend/secp256k1/src/group_impl.h b/depend/secp256k1/src/group_impl.h index b1ace87b6..9b93c39e9 100644 --- a/depend/secp256k1/src/group_impl.h +++ b/depend/secp256k1/src/group_impl.h @@ -38,22 +38,22 @@ */ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 199 -const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( +static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0xFA7CC9A7, 0x0737F2DB, 0xA749DD39, 0x2B4FB069, 0x3B017A7D, 0xA808C2F1, 0xFB12940C, 0x9EA66C18, 0x78AC123A, 0x5ED8AEF3, 0x8732BC91, 0x1F3A2868, 0x48DF246C, 0x808DAE72, 0xCFE52572, 0x7F0501ED ); -const int CURVE_B = 4; +static const int CURVE_B = 4; # elif EXHAUSTIVE_TEST_ORDER == 13 -const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( +static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0xedc60018, 0xa51a786b, 0x2ea91f4d, 0x4c9416c0, 0x9de54c3b, 0xa1316554, 0x6cf4345c, 0x7277ef15, 0x54cb1b6b, 0xdc8c1273, 0x087844ea, 0x43f4603e, 0x0eaf9a43, 0xf6effe55, 0x939f806d, 0x37adf8ac ); -const int CURVE_B = 2; +static const int CURVE_B = 2; # else # error No known generator for the specified exhaustive test group order. # endif @@ -68,7 +68,7 @@ static const secp256k1_ge secp256k1_ge_const_g = SECP256K1_GE_CONST( 0xFD17B448UL, 0xA6855419UL, 0x9C47D08FUL, 0xFB10D4B8UL ); -const int CURVE_B = 7; +static const int CURVE_B = 7; #endif static void secp256k1_ge_set_gej_zinv(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zi) { @@ -126,46 +126,43 @@ static void secp256k1_ge_set_gej_var(secp256k1_ge *r, secp256k1_gej *a) { r->y = a->y; } -static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len, const secp256k1_callback *cb) { - secp256k1_fe *az; - secp256k1_fe *azi; +static void secp256k1_ge_set_all_gej_var(secp256k1_ge *r, const secp256k1_gej *a, size_t len) { + secp256k1_fe u; size_t i; - size_t count = 0; - az = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * len); + size_t last_i = SIZE_MAX; + for (i = 0; i < len; i++) { if (!a[i].infinity) { - az[count++] = a[i].z; + /* Use destination's x coordinates as scratch space */ + if (last_i == SIZE_MAX) { + r[i].x = a[i].z; + } else { + secp256k1_fe_mul(&r[i].x, &r[last_i].x, &a[i].z); + } + last_i = i; } } + if (last_i == SIZE_MAX) { + return; + } + secp256k1_fe_inv_var(&u, &r[last_i].x); - azi = (secp256k1_fe *)checked_malloc(cb, sizeof(secp256k1_fe) * count); - secp256k1_fe_inv_all_var(azi, az, count); - free(az); - - count = 0; - for (i = 0; i < len; i++) { - r[i].infinity = a[i].infinity; + i = last_i; + while (i > 0) { + i--; if (!a[i].infinity) { - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &azi[count++]); + secp256k1_fe_mul(&r[last_i].x, &r[i].x, &u); + secp256k1_fe_mul(&u, &u, &a[last_i].z); + last_i = i; } } - free(azi); -} + VERIFY_CHECK(!a[last_i].infinity); + r[last_i].x = u; -static void secp256k1_ge_set_table_gej_var(secp256k1_ge *r, const secp256k1_gej *a, const secp256k1_fe *zr, size_t len) { - size_t i = len - 1; - secp256k1_fe zi; - - if (len > 0) { - /* Compute the inverse of the last z coordinate, and use it to compute the last affine output. */ - secp256k1_fe_inv(&zi, &a[i].z); - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); - - /* Work out way backwards, using the z-ratios to scale the x/y values. */ - while (i > 0) { - secp256k1_fe_mul(&zi, &zi, &zr[i]); - i--; - secp256k1_ge_set_gej_zinv(&r[i], &a[i], &zi); + for (i = 0; i < len; i++) { + r[i].infinity = a[i].infinity; + if (!a[i].infinity) { + secp256k1_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } } } @@ -178,6 +175,8 @@ static void secp256k1_ge_globalz_set_table_gej(size_t len, secp256k1_ge *r, secp /* The z of the final point gives us the "global Z" for the table. */ r[i].x = a[i].x; r[i].y = a[i].y; + /* Ensure all y values are in weak normal form for fast negation of points */ + secp256k1_fe_normalize_weak(&r[i].y); *globalz = a[i].z; r[i].infinity = 0; zs = zr[i]; diff --git a/depend/secp256k1/src/hash_impl.h b/depend/secp256k1/src/hash_impl.h index 009f26beb..782f97216 100644 --- a/depend/secp256k1/src/hash_impl.h +++ b/depend/secp256k1/src/hash_impl.h @@ -131,7 +131,8 @@ static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) { static void secp256k1_sha256_write(secp256k1_sha256 *hash, const unsigned char *data, size_t len) { size_t bufsize = hash->bytes & 0x3F; hash->bytes += len; - while (bufsize + len >= 64) { + VERIFY_CHECK(hash->bytes >= len); + while (len >= 64 - bufsize) { /* Fill the buffer, and process it. */ size_t chunk_len = 64 - bufsize; memcpy(((unsigned char*)hash->buf) + bufsize, data, chunk_len); diff --git a/depend/secp256k1/src/scalar_4x64_impl.h b/depend/secp256k1/src/scalar_4x64_impl.h index db1ebf94b..d378335d9 100644 --- a/depend/secp256k1/src/scalar_4x64_impl.h +++ b/depend/secp256k1/src/scalar_4x64_impl.h @@ -376,7 +376,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) /* extract m6 */ "movq %%r8, %q6\n" : "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6) - : "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) + : "S"(l), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc"); /* Reduce 385 bits into 258. */ @@ -455,7 +455,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) /* extract p4 */ "movq %%r9, %q4\n" : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4) - : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) + : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc"); /* Reduce 258 bits into 256. */ @@ -501,7 +501,7 @@ static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) /* Extract c */ "movq %%r9, %q0\n" : "=g"(c) - : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1) + : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "i"(SECP256K1_N_C_0), "i"(SECP256K1_N_C_1) : "rax", "rdx", "r8", "r9", "r10", "cc", "memory"); #else uint128_t c; diff --git a/depend/secp256k1/src/scratch.h b/depend/secp256k1/src/scratch.h index fef377af0..77b35d126 100644 --- a/depend/secp256k1/src/scratch.h +++ b/depend/secp256k1/src/scratch.h @@ -7,33 +7,36 @@ #ifndef _SECP256K1_SCRATCH_ #define _SECP256K1_SCRATCH_ -#define SECP256K1_SCRATCH_MAX_FRAMES 5 - /* The typedef is used internally; the struct name is used in the public API * (where it is exposed as a different typedef) */ typedef struct secp256k1_scratch_space_struct { - void *data[SECP256K1_SCRATCH_MAX_FRAMES]; - size_t offset[SECP256K1_SCRATCH_MAX_FRAMES]; - size_t frame_size[SECP256K1_SCRATCH_MAX_FRAMES]; - size_t frame; + /** guard against interpreting this object as other types */ + unsigned char magic[8]; + /** actual allocated data */ + void *data; + /** amount that has been allocated (i.e. `data + offset` is the next + * available pointer) */ + size_t alloc_size; + /** maximum size available to allocate */ size_t max_size; - const secp256k1_callback* error_callback; } secp256k1_scratch; static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size); -static void secp256k1_scratch_destroy(secp256k1_scratch* scratch); +static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch); -/** Attempts to allocate a new stack frame with `n` available bytes. Returns 1 on success, 0 on failure */ -static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects); +/** Returns an opaque object used to "checkpoint" a scratch space. Used + * with `secp256k1_scratch_apply_checkpoint` to undo allocations. */ +static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch); -/** Deallocates a stack frame */ -static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch); +/** Applies a check point received from `secp256k1_scratch_checkpoint`, + * undoing all allocations since that point. */ +static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint); /** Returns the maximum allocation the scratch space will allow */ -static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t n_objects); +static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t n_objects); /** Returns a pointer into the most recently allocated frame, or NULL if there is insufficient available space */ -static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t n); +static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t n); #endif diff --git a/depend/secp256k1/src/scratch_impl.h b/depend/secp256k1/src/scratch_impl.h index abed713b2..4cee70000 100644 --- a/depend/secp256k1/src/scratch_impl.h +++ b/depend/secp256k1/src/scratch_impl.h @@ -7,78 +7,80 @@ #ifndef _SECP256K1_SCRATCH_IMPL_H_ #define _SECP256K1_SCRATCH_IMPL_H_ +#include "util.h" #include "scratch.h" -/* Using 16 bytes alignment because common architectures never have alignment - * requirements above 8 for any of the types we care about. In addition we - * leave some room because currently we don't care about a few bytes. - * TODO: Determine this at configure time. */ -#define ALIGNMENT 16 - -static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t max_size) { - secp256k1_scratch* ret = (secp256k1_scratch*)checked_malloc(error_callback, sizeof(*ret)); +static secp256k1_scratch* secp256k1_scratch_create(const secp256k1_callback* error_callback, size_t size) { + const size_t base_alloc = ((sizeof(secp256k1_scratch) + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; + void *alloc = checked_malloc(error_callback, base_alloc + size); + secp256k1_scratch* ret = (secp256k1_scratch *)alloc; if (ret != NULL) { memset(ret, 0, sizeof(*ret)); - ret->max_size = max_size; - ret->error_callback = error_callback; + memcpy(ret->magic, "scratch", 8); + ret->data = (void *) ((char *) alloc + base_alloc); + ret->max_size = size; } return ret; } -static void secp256k1_scratch_destroy(secp256k1_scratch* scratch) { +static void secp256k1_scratch_destroy(const secp256k1_callback* error_callback, secp256k1_scratch* scratch) { if (scratch != NULL) { - VERIFY_CHECK(scratch->frame == 0); + VERIFY_CHECK(scratch->alloc_size == 0); /* all checkpoints should be applied */ + if (memcmp(scratch->magic, "scratch", 8) != 0) { + secp256k1_callback_call(error_callback, "invalid scratch space"); + return; + } + memset(scratch->magic, 0, sizeof(scratch->magic)); free(scratch); } } -static size_t secp256k1_scratch_max_allocation(const secp256k1_scratch* scratch, size_t objects) { - size_t i = 0; - size_t allocated = 0; - for (i = 0; i < scratch->frame; i++) { - allocated += scratch->frame_size[i]; - } - if (scratch->max_size - allocated <= objects * ALIGNMENT) { +static size_t secp256k1_scratch_checkpoint(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch) { + if (memcmp(scratch->magic, "scratch", 8) != 0) { + secp256k1_callback_call(error_callback, "invalid scratch space"); return 0; } - return scratch->max_size - allocated - objects * ALIGNMENT; + return scratch->alloc_size; } -static int secp256k1_scratch_allocate_frame(secp256k1_scratch* scratch, size_t n, size_t objects) { - VERIFY_CHECK(scratch->frame < SECP256K1_SCRATCH_MAX_FRAMES); - - if (n <= secp256k1_scratch_max_allocation(scratch, objects)) { - n += objects * ALIGNMENT; - scratch->data[scratch->frame] = checked_malloc(scratch->error_callback, n); - if (scratch->data[scratch->frame] == NULL) { - return 0; - } - scratch->frame_size[scratch->frame] = n; - scratch->offset[scratch->frame] = 0; - scratch->frame++; - return 1; - } else { - return 0; +static void secp256k1_scratch_apply_checkpoint(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t checkpoint) { + if (memcmp(scratch->magic, "scratch", 8) != 0) { + secp256k1_callback_call(error_callback, "invalid scratch space"); + return; + } + if (checkpoint > scratch->alloc_size) { + secp256k1_callback_call(error_callback, "invalid checkpoint"); + return; } + scratch->alloc_size = checkpoint; } -static void secp256k1_scratch_deallocate_frame(secp256k1_scratch* scratch) { - VERIFY_CHECK(scratch->frame > 0); - scratch->frame -= 1; - free(scratch->data[scratch->frame]); +static size_t secp256k1_scratch_max_allocation(const secp256k1_callback* error_callback, const secp256k1_scratch* scratch, size_t objects) { + if (memcmp(scratch->magic, "scratch", 8) != 0) { + secp256k1_callback_call(error_callback, "invalid scratch space"); + return 0; + } + if (scratch->max_size - scratch->alloc_size <= objects * (ALIGNMENT - 1)) { + return 0; + } + return scratch->max_size - scratch->alloc_size - objects * (ALIGNMENT - 1); } -static void *secp256k1_scratch_alloc(secp256k1_scratch* scratch, size_t size) { +static void *secp256k1_scratch_alloc(const secp256k1_callback* error_callback, secp256k1_scratch* scratch, size_t size) { void *ret; - size_t frame = scratch->frame - 1; - size = ((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; + size = ROUND_TO_ALIGN(size); + + if (memcmp(scratch->magic, "scratch", 8) != 0) { + secp256k1_callback_call(error_callback, "invalid scratch space"); + return NULL; + } - if (scratch->frame == 0 || size + scratch->offset[frame] > scratch->frame_size[frame]) { + if (size > scratch->max_size - scratch->alloc_size) { return NULL; } - ret = (void *) ((unsigned char *) scratch->data[frame] + scratch->offset[frame]); + ret = (void *) ((char *) scratch->data + scratch->alloc_size); memset(ret, 0, size); - scratch->offset[frame] += size; + scratch->alloc_size += size; return ret; } diff --git a/depend/secp256k1/src/secp256k1.c b/depend/secp256k1/src/secp256k1.c index a1e390807..6954e1bcf 100644 --- a/depend/secp256k1/src/secp256k1.c +++ b/depend/secp256k1/src/secp256k1.c @@ -5,6 +5,7 @@ **********************************************************************/ #include "include/secp256k1.h" +#include "include/secp256k1_preallocated.h" #include "util.h" #include "num_impl.h" @@ -26,28 +27,39 @@ } \ } while(0) -static void default_illegal_callback_fn(const char* str, void* data) { +#define ARG_CHECK_NO_RETURN(cond) do { \ + if (EXPECT(!(cond), 0)) { \ + secp256k1_callback_call(&ctx->illegal_callback, #cond); \ + } \ +} while(0) + +#ifndef USE_EXTERNAL_DEFAULT_CALLBACKS +#include +#include +static void secp256k1_default_illegal_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str); abort(); } - -static const secp256k1_callback default_illegal_callback = { - default_illegal_callback_fn, - NULL -}; - -static void default_error_callback_fn(const char* str, void* data) { +static void secp256k1_default_error_callback_fn(const char* str, void* data) { (void)data; fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str); abort(); } +#else +void secp256k1_default_illegal_callback_fn(const char* str, void* data); +void secp256k1_default_error_callback_fn(const char* str, void* data); +#endif -static const secp256k1_callback default_error_callback = { - default_error_callback_fn, +static const secp256k1_callback default_illegal_callback = { + secp256k1_default_illegal_callback_fn, NULL }; +static const secp256k1_callback default_error_callback = { + secp256k1_default_error_callback_fn, + NULL +}; struct secp256k1_context_struct { secp256k1_ecmult_context ecmult_ctx; @@ -59,20 +71,55 @@ struct secp256k1_context_struct { static const secp256k1_context secp256k1_context_no_precomp_ = { { 0 }, { 0 }, - { default_illegal_callback_fn, 0 }, - { default_error_callback_fn, 0 } + { secp256k1_default_illegal_callback_fn, 0 }, + { secp256k1_default_error_callback_fn, 0 } }; const secp256k1_context *secp256k1_context_no_precomp = &secp256k1_context_no_precomp_; -secp256k1_context* secp256k1_context_create(unsigned int flags) { - secp256k1_context* ret = (secp256k1_context*)checked_malloc(&default_error_callback, sizeof(secp256k1_context)); +size_t secp256k1_context_preallocated_size(unsigned int flags) { + size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context)); + + if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { + secp256k1_callback_call(&default_illegal_callback, + "Invalid flags"); + return 0; + } + + if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { + ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; + } + if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { + ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; + } + return ret; +} + +size_t secp256k1_context_preallocated_clone_size(const secp256k1_context* ctx) { + size_t ret = ROUND_TO_ALIGN(sizeof(secp256k1_context)); + VERIFY_CHECK(ctx != NULL); + if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + ret += SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; + } + if (secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx)) { + ret += SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; + } + return ret; +} + +secp256k1_context* secp256k1_context_preallocated_create(void* prealloc, unsigned int flags) { + void* const base = prealloc; + size_t prealloc_size; + secp256k1_context* ret; + + VERIFY_CHECK(prealloc != NULL); + prealloc_size = secp256k1_context_preallocated_size(flags); + ret = (secp256k1_context*)manual_alloc(&prealloc, sizeof(secp256k1_context), base, prealloc_size); ret->illegal_callback = default_illegal_callback; ret->error_callback = default_error_callback; if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) { secp256k1_callback_call(&ret->illegal_callback, "Invalid flags"); - free(ret); return NULL; } @@ -80,47 +127,79 @@ secp256k1_context* secp256k1_context_create(unsigned int flags) { secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx); if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) { - secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback); + secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &prealloc); } if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) { - secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback); + secp256k1_ecmult_context_build(&ret->ecmult_ctx, &prealloc); } + return (secp256k1_context*) ret; +} + +secp256k1_context* secp256k1_context_create(unsigned int flags) { + size_t const prealloc_size = secp256k1_context_preallocated_size(flags); + secp256k1_context* ctx = (secp256k1_context*)checked_malloc(&default_error_callback, prealloc_size); + if (EXPECT(secp256k1_context_preallocated_create(ctx, flags) == NULL, 0)) { + free(ctx); + return NULL; + } + + return ctx; +} + +secp256k1_context* secp256k1_context_preallocated_clone(const secp256k1_context* ctx, void* prealloc) { + size_t prealloc_size; + secp256k1_context* ret; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(prealloc != NULL); + + prealloc_size = secp256k1_context_preallocated_clone_size(ctx); + ret = (secp256k1_context*)prealloc; + memcpy(ret, ctx, prealloc_size); + secp256k1_ecmult_gen_context_finalize_memcpy(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx); + secp256k1_ecmult_context_finalize_memcpy(&ret->ecmult_ctx, &ctx->ecmult_ctx); return ret; } secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) { - secp256k1_context* ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context)); - ret->illegal_callback = ctx->illegal_callback; - ret->error_callback = ctx->error_callback; - secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback); - secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback); + secp256k1_context* ret; + size_t prealloc_size; + + VERIFY_CHECK(ctx != NULL); + prealloc_size = secp256k1_context_preallocated_clone_size(ctx); + ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, prealloc_size); + ret = secp256k1_context_preallocated_clone(ctx, ret); return ret; } -void secp256k1_context_destroy(secp256k1_context* ctx) { - CHECK(ctx != secp256k1_context_no_precomp); +void secp256k1_context_preallocated_destroy(secp256k1_context* ctx) { + ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); if (ctx != NULL) { secp256k1_ecmult_context_clear(&ctx->ecmult_ctx); secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx); + } +} +void secp256k1_context_destroy(secp256k1_context* ctx) { + if (ctx != NULL) { + secp256k1_context_preallocated_destroy(ctx); free(ctx); } } void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - CHECK(ctx != secp256k1_context_no_precomp); + ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); if (fun == NULL) { - fun = default_illegal_callback_fn; + fun = secp256k1_default_illegal_callback_fn; } ctx->illegal_callback.fn = fun; ctx->illegal_callback.data = data; } void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) { - CHECK(ctx != secp256k1_context_no_precomp); + ARG_CHECK_NO_RETURN(ctx != secp256k1_context_no_precomp); if (fun == NULL) { - fun = default_error_callback_fn; + fun = secp256k1_default_error_callback_fn; } ctx->error_callback.fn = fun; ctx->error_callback.data = data; @@ -131,8 +210,9 @@ secp256k1_scratch_space* secp256k1_scratch_space_create(const secp256k1_context* return secp256k1_scratch_create(&ctx->error_callback, max_size); } -void secp256k1_scratch_space_destroy(secp256k1_scratch_space* scratch) { - secp256k1_scratch_destroy(scratch); +void secp256k1_scratch_space_destroy(const secp256k1_context *ctx, secp256k1_scratch_space* scratch) { + VERIFY_CHECK(ctx != NULL); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); } static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) { @@ -457,6 +537,7 @@ int secp256k1_ec_privkey_negate(const secp256k1_context* ctx, unsigned char *sec secp256k1_scalar_negate(&sec, &sec); secp256k1_scalar_get_b32(seckey, &sec); + secp256k1_scalar_clear(&sec); return 1; } @@ -570,9 +651,9 @@ int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) { VERIFY_CHECK(ctx != NULL); - CHECK(ctx != secp256k1_context_no_precomp); - ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + if (secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)) { + secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32); + } return 1; } diff --git a/depend/secp256k1/src/tests.c b/depend/secp256k1/src/tests.c index c72a742d8..1d813f1ad 100644 --- a/depend/secp256k1/src/tests.c +++ b/depend/secp256k1/src/tests.c @@ -16,6 +16,7 @@ #include "secp256k1.c" #include "include/secp256k1.h" +#include "include/secp256k1_preallocated.h" #include "testrand_impl.h" #ifdef ENABLE_OPENSSL_TESTS @@ -137,23 +138,47 @@ void random_scalar_order(secp256k1_scalar *num) { } while(1); } -void run_context_tests(void) { +void run_context_tests(int use_prealloc) { secp256k1_pubkey pubkey; secp256k1_pubkey zero_pubkey; secp256k1_ecdsa_signature sig; unsigned char ctmp[32]; int32_t ecount; int32_t ecount2; - secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); - secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); - secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); - secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + secp256k1_context *none; + secp256k1_context *sign; + secp256k1_context *vrfy; + secp256k1_context *both; + void *none_prealloc = NULL; + void *sign_prealloc = NULL; + void *vrfy_prealloc = NULL; + void *both_prealloc = NULL; secp256k1_gej pubj; secp256k1_ge pub; secp256k1_scalar msg, key, nonce; secp256k1_scalar sigr, sigs; + if (use_prealloc) { + none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); + vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); + both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); + CHECK(none_prealloc != NULL); + CHECK(sign_prealloc != NULL); + CHECK(vrfy_prealloc != NULL); + CHECK(both_prealloc != NULL); + none = secp256k1_context_preallocated_create(none_prealloc, SECP256K1_CONTEXT_NONE); + sign = secp256k1_context_preallocated_create(sign_prealloc, SECP256K1_CONTEXT_SIGN); + vrfy = secp256k1_context_preallocated_create(vrfy_prealloc, SECP256K1_CONTEXT_VERIFY); + both = secp256k1_context_preallocated_create(both_prealloc, SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + } else { + none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); + sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN); + vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY); + both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + } + memset(&zero_pubkey, 0, sizeof(zero_pubkey)); ecount = 0; @@ -163,14 +188,57 @@ void run_context_tests(void) { secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL); CHECK(vrfy->error_callback.fn != sign->error_callback.fn); + /* check if sizes for cloning are consistent */ + CHECK(secp256k1_context_preallocated_clone_size(none) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); + CHECK(secp256k1_context_preallocated_clone_size(sign) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); + CHECK(secp256k1_context_preallocated_clone_size(vrfy) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); + CHECK(secp256k1_context_preallocated_clone_size(both) == secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); + /*** clone and destroy all of them to make sure cloning was complete ***/ { secp256k1_context *ctx_tmp; - ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_destroy(ctx_tmp); - ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_destroy(ctx_tmp); + if (use_prealloc) { + /* clone into a non-preallocated context and then again into a new preallocated one. */ + ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp); + free(none_prealloc); none_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(none_prealloc != NULL); + ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, none_prealloc); secp256k1_context_destroy(ctx_tmp); + + ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp); + free(sign_prealloc); sign_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(sign_prealloc != NULL); + ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, sign_prealloc); secp256k1_context_destroy(ctx_tmp); + + ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp); + free(vrfy_prealloc); vrfy_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(vrfy_prealloc != NULL); + ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, vrfy_prealloc); secp256k1_context_destroy(ctx_tmp); + + ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp); + free(both_prealloc); both_prealloc = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(both_prealloc != NULL); + ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, both_prealloc); secp256k1_context_destroy(ctx_tmp); + } else { + /* clone into a preallocated context and then again into a new non-preallocated one. */ + void *prealloc_tmp; + + prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_NONE)); CHECK(prealloc_tmp != NULL); + ctx_tmp = none; none = secp256k1_context_preallocated_clone(none, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_preallocated_destroy(ctx_tmp); + free(prealloc_tmp); + + prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN)); CHECK(prealloc_tmp != NULL); + ctx_tmp = sign; sign = secp256k1_context_preallocated_clone(sign, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_preallocated_destroy(ctx_tmp); + free(prealloc_tmp); + + prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); + ctx_tmp = vrfy; vrfy = secp256k1_context_preallocated_clone(vrfy, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_preallocated_destroy(ctx_tmp); + free(prealloc_tmp); + + prealloc_tmp = malloc(secp256k1_context_preallocated_size(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY)); CHECK(prealloc_tmp != NULL); + ctx_tmp = both; both = secp256k1_context_preallocated_clone(both, prealloc_tmp); secp256k1_context_destroy(ctx_tmp); + ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_preallocated_destroy(ctx_tmp); + free(prealloc_tmp); + } } /* Verify that the error callback makes it across the clone. */ @@ -218,17 +286,17 @@ void run_context_tests(void) { CHECK(ecount == 3); CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1); CHECK(ecount == 3); - CHECK(secp256k1_context_randomize(vrfy, ctmp) == 0); - CHECK(ecount == 4); + CHECK(secp256k1_context_randomize(vrfy, ctmp) == 1); + CHECK(ecount == 3); + CHECK(secp256k1_context_randomize(vrfy, NULL) == 1); + CHECK(ecount == 3); + CHECK(secp256k1_context_randomize(sign, ctmp) == 1); + CHECK(ecount2 == 14); CHECK(secp256k1_context_randomize(sign, NULL) == 1); CHECK(ecount2 == 14); secp256k1_context_set_illegal_callback(vrfy, NULL, NULL); secp256k1_context_set_illegal_callback(sign, NULL, NULL); - /* This shouldn't leak memory, due to already-set tests. */ - secp256k1_ecmult_gen_context_build(&sign->ecmult_gen_ctx, NULL); - secp256k1_ecmult_context_build(&vrfy->ecmult_ctx, NULL); - /* obtain a working nonce */ do { random_scalar_order_test(&nonce); @@ -243,49 +311,95 @@ void run_context_tests(void) { CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg)); /* cleanup */ - secp256k1_context_destroy(none); - secp256k1_context_destroy(sign); - secp256k1_context_destroy(vrfy); - secp256k1_context_destroy(both); + if (use_prealloc) { + secp256k1_context_preallocated_destroy(none); + secp256k1_context_preallocated_destroy(sign); + secp256k1_context_preallocated_destroy(vrfy); + secp256k1_context_preallocated_destroy(both); + free(none_prealloc); + free(sign_prealloc); + free(vrfy_prealloc); + free(both_prealloc); + } else { + secp256k1_context_destroy(none); + secp256k1_context_destroy(sign); + secp256k1_context_destroy(vrfy); + secp256k1_context_destroy(both); + } /* Defined as no-op. */ secp256k1_context_destroy(NULL); + secp256k1_context_preallocated_destroy(NULL); + } void run_scratch_tests(void) { + const size_t adj_alloc = ((500 + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT; + int32_t ecount = 0; + size_t checkpoint; + size_t checkpoint_2; secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE); secp256k1_scratch_space *scratch; + secp256k1_scratch_space local_scratch; /* Test public API */ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount); scratch = secp256k1_scratch_space_create(none, 1000); CHECK(scratch != NULL); CHECK(ecount == 0); /* Test internal API */ - CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000); - CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 1000); - - /* Allocating 500 bytes with no frame fails */ - CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL); - CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000); - - /* ...but pushing a new stack frame does affect the max allocation */ - CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1 == 1)); - CHECK(secp256k1_scratch_max_allocation(scratch, 1) < 500); /* 500 - ALIGNMENT */ - CHECK(secp256k1_scratch_alloc(scratch, 500) != NULL); - CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL); - - CHECK(secp256k1_scratch_allocate_frame(scratch, 500, 1) == 0); + CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); + CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - (ALIGNMENT - 1)); + CHECK(scratch->alloc_size == 0); + CHECK(scratch->alloc_size % ALIGNMENT == 0); + + /* Allocating 500 bytes succeeds */ + checkpoint = secp256k1_scratch_checkpoint(&none->error_callback, scratch); + CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); + CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + CHECK(scratch->alloc_size != 0); + CHECK(scratch->alloc_size % ALIGNMENT == 0); + + /* Allocating another 500 bytes fails */ + CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL); + CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000 - adj_alloc); + CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 1) == 1000 - adj_alloc - (ALIGNMENT - 1)); + CHECK(scratch->alloc_size != 0); + CHECK(scratch->alloc_size % ALIGNMENT == 0); + + /* ...but it succeeds once we apply the checkpoint to undo it */ + secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + CHECK(scratch->alloc_size == 0); + CHECK(secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0) == 1000); + CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) != NULL); + CHECK(scratch->alloc_size != 0); + + /* try to apply a bad checkpoint */ + checkpoint_2 = secp256k1_scratch_checkpoint(&none->error_callback, scratch); + secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint); + CHECK(ecount == 0); + secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, checkpoint_2); /* checkpoint_2 is after checkpoint */ + CHECK(ecount == 1); + secp256k1_scratch_apply_checkpoint(&none->error_callback, scratch, (size_t) -1); /* this is just wildly invalid */ + CHECK(ecount == 2); - /* ...and this effect is undone by popping the frame */ - secp256k1_scratch_deallocate_frame(scratch); - CHECK(secp256k1_scratch_max_allocation(scratch, 0) == 1000); - CHECK(secp256k1_scratch_alloc(scratch, 500) == NULL); + /* try to use badly initialized scratch space */ + secp256k1_scratch_space_destroy(none, scratch); + memset(&local_scratch, 0, sizeof(local_scratch)); + scratch = &local_scratch; + CHECK(!secp256k1_scratch_max_allocation(&none->error_callback, scratch, 0)); + CHECK(ecount == 3); + CHECK(secp256k1_scratch_alloc(&none->error_callback, scratch, 500) == NULL); + CHECK(ecount == 4); + secp256k1_scratch_space_destroy(none, scratch); + CHECK(ecount == 5); /* cleanup */ - secp256k1_scratch_space_destroy(scratch); + secp256k1_scratch_space_destroy(none, NULL); /* no-op */ secp256k1_context_destroy(none); } @@ -2095,7 +2209,6 @@ void test_ge(void) { /* Test batch gej -> ge conversion with and without known z ratios. */ { secp256k1_fe *zr = (secp256k1_fe *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_fe)); - secp256k1_ge *ge_set_table = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge)); secp256k1_ge *ge_set_all = (secp256k1_ge *)checked_malloc(&ctx->error_callback, (4 * runs + 1) * sizeof(secp256k1_ge)); for (i = 0; i < 4 * runs + 1; i++) { /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */ @@ -2103,20 +2216,33 @@ void test_ge(void) { secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z); } } - secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1); - secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback); + secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1); for (i = 0; i < 4 * runs + 1; i++) { secp256k1_fe s; random_fe_non_zero(&s); secp256k1_gej_rescale(&gej[i], &s); - ge_equals_gej(&ge_set_table[i], &gej[i]); ge_equals_gej(&ge_set_all[i], &gej[i]); } - free(ge_set_table); free(ge_set_all); free(zr); } + /* Test batch gej -> ge conversion with many infinities. */ + for (i = 0; i < 4 * runs + 1; i++) { + random_group_element_test(&ge[i]); + /* randomly set half the points to infinitiy */ + if(secp256k1_fe_is_odd(&ge[i].x)) { + secp256k1_ge_set_infinity(&ge[i]); + } + secp256k1_gej_set_ge(&gej[i], &ge[i]); + } + /* batch invert */ + secp256k1_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + /* check result */ + for (i = 0; i < 4 * runs + 1; i++) { + ge_equals_gej(&ge[i], &gej[i]); + } + free(ge); free(gej); free(zinv); @@ -2556,14 +2682,13 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e secp256k1_gej r; secp256k1_gej r2; ecmult_multi_data data; - secp256k1_scratch *scratch_empty; data.sc = sc; data.pt = pt; secp256k1_scalar_set_int(&szero, 0); /* No points to multiply */ - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, NULL, ecmult_multi_callback, &data, 0)); /* Check 1- and 2-point multiplies against ecmult */ for (ncount = 0; ncount < count; ncount++) { @@ -2579,36 +2704,31 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e /* only G scalar */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &szero, &sc[0]); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[0], ecmult_multi_callback, &data, 0)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); /* 1-point */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &szero); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 1)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); - /* Try to multiply 1 point, but scratch space is empty */ - scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0); - CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1)); - secp256k1_scratch_destroy(scratch_empty); - /* Try to multiply 1 point, but callback returns false */ - CHECK(!ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1)); + CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_false_callback, &data, 1)); /* 2-point */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 2)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); /* 2-point with G scalar */ secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &ptgj, &sc[0], &sc[1]); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &sc[1], ecmult_multi_callback, &data, 1)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); @@ -2625,7 +2745,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e random_scalar_order(&sc[i]); secp256k1_ge_set_infinity(&pt[i]); } - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); } @@ -2635,7 +2755,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e pt[i] = ptg; secp256k1_scalar_set_int(&sc[i], 0); } - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); } @@ -2648,7 +2768,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e pt[2 * i + 1] = ptg; } - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); random_scalar_order(&sc[0]); @@ -2661,7 +2781,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e secp256k1_ge_neg(&pt[2*i+1], &pt[2*i]); } - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, sizes[j])); CHECK(secp256k1_gej_is_infinity(&r)); } @@ -2676,7 +2796,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e secp256k1_scalar_negate(&sc[i], &sc[i]); } - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 32)); CHECK(secp256k1_gej_is_infinity(&r)); } @@ -2695,7 +2815,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e } secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &r, &sc[0], &szero); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); @@ -2718,7 +2838,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e secp256k1_gej_set_ge(&p0j, &pt[0]); secp256k1_ecmult(&ctx->ecmult_ctx, &r2, &p0j, &rs, &szero); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); secp256k1_gej_neg(&r2, &r2); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); @@ -2731,13 +2851,13 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e } secp256k1_scalar_clear(&sc[0]); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 20)); secp256k1_scalar_clear(&sc[1]); secp256k1_scalar_clear(&sc[2]); secp256k1_scalar_clear(&sc[3]); secp256k1_scalar_clear(&sc[4]); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6)); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 6)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &szero, ecmult_multi_callback, &data, 5)); CHECK(secp256k1_gej_is_infinity(&r)); /* Run through s0*(t0*P) + s1*(t1*P) exhaustively for many small values of s0, s1, t0, t1 */ @@ -2782,7 +2902,7 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e secp256k1_scalar_add(&tmp1, &tmp1, &tmp2); secp256k1_ecmult(&ctx->ecmult_ctx, &expected, &ptgj, &tmp1, &szero); - CHECK(ecmult_multi(&ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2)); + CHECK(ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &actual, &szero, ecmult_multi_callback, &data, 2)); secp256k1_gej_neg(&expected, &expected); secp256k1_gej_add_var(&actual, &actual, &expected, NULL); CHECK(secp256k1_gej_is_infinity(&actual)); @@ -2793,6 +2913,24 @@ void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi_func e } } +void test_ecmult_multi_batch_single(secp256k1_ecmult_multi_func ecmult_multi) { + secp256k1_scalar szero; + secp256k1_scalar sc[32]; + secp256k1_ge pt[32]; + secp256k1_gej r; + ecmult_multi_data data; + secp256k1_scratch *scratch_empty; + + data.sc = sc; + data.pt = pt; + secp256k1_scalar_set_int(&szero, 0); + + /* Try to multiply 1 point, but scratch space is empty.*/ + scratch_empty = secp256k1_scratch_create(&ctx->error_callback, 0); + CHECK(!ecmult_multi(&ctx->error_callback, &ctx->ecmult_ctx, scratch_empty, &r, &szero, ecmult_multi_callback, &data, 1)); + secp256k1_scratch_destroy(&ctx->error_callback, scratch_empty); +} + void test_secp256k1_pippenger_bucket_window_inv(void) { int i; @@ -2823,21 +2961,75 @@ void test_ecmult_multi_pippenger_max_points(void) { int bucket_window = 0; for(; scratch_size < max_size; scratch_size+=256) { + size_t i; + size_t total_alloc; + size_t checkpoint; scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size); CHECK(scratch != NULL); - n_points_supported = secp256k1_pippenger_max_points(scratch); + checkpoint = secp256k1_scratch_checkpoint(&ctx->error_callback, scratch); + n_points_supported = secp256k1_pippenger_max_points(&ctx->error_callback, scratch); if (n_points_supported == 0) { - secp256k1_scratch_destroy(scratch); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); continue; } bucket_window = secp256k1_pippenger_bucket_window(n_points_supported); - CHECK(secp256k1_scratch_allocate_frame(scratch, secp256k1_pippenger_scratch_size(n_points_supported, bucket_window), PIPPENGER_SCRATCH_OBJECTS)); - secp256k1_scratch_deallocate_frame(scratch); - secp256k1_scratch_destroy(scratch); + /* allocate `total_alloc` bytes over `PIPPENGER_SCRATCH_OBJECTS` many allocations */ + total_alloc = secp256k1_pippenger_scratch_size(n_points_supported, bucket_window); + for (i = 0; i < PIPPENGER_SCRATCH_OBJECTS - 1; i++) { + CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, 1)); + total_alloc--; + } + CHECK(secp256k1_scratch_alloc(&ctx->error_callback, scratch, total_alloc)); + secp256k1_scratch_apply_checkpoint(&ctx->error_callback, scratch, checkpoint); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); } CHECK(bucket_window == PIPPENGER_MAX_BUCKET_WINDOW); } +void test_ecmult_multi_batch_size_helper(void) { + size_t n_batches, n_batch_points, max_n_batch_points, n; + + max_n_batch_points = 0; + n = 1; + CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 0); + + max_n_batch_points = 1; + n = 0; + CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(n_batches == 0); + CHECK(n_batch_points == 0); + + max_n_batch_points = 2; + n = 5; + CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(n_batches == 3); + CHECK(n_batch_points == 2); + + max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; + n = ECMULT_MAX_POINTS_PER_BATCH; + CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(n_batches == 1); + CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH); + + max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH + 1; + n = ECMULT_MAX_POINTS_PER_BATCH + 1; + CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(n_batches == 2); + CHECK(n_batch_points == ECMULT_MAX_POINTS_PER_BATCH/2 + 1); + + max_n_batch_points = 1; + n = SIZE_MAX; + CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(n_batches == SIZE_MAX); + CHECK(n_batch_points == 1); + + max_n_batch_points = 2; + n = SIZE_MAX; + CHECK(secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, max_n_batch_points, n) == 1); + CHECK(n_batches == SIZE_MAX/2 + 1); + CHECK(n_batch_points == 2); +} + /** * Run secp256k1_ecmult_multi_var with num points and a scratch space restricted to * 1 <= i <= num points. @@ -2872,19 +3064,25 @@ void test_ecmult_multi_batching(void) { } data.sc = sc; data.pt = pt; + secp256k1_gej_neg(&r2, &r2); - /* Test with empty scratch space */ + /* Test with empty scratch space. It should compute the correct result using + * ecmult_mult_simple algorithm which doesn't require a scratch space. */ scratch = secp256k1_scratch_create(&ctx->error_callback, 0); - CHECK(!secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, 1)); - secp256k1_scratch_destroy(scratch); + CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + secp256k1_gej_add_var(&r, &r, &r2, NULL); + CHECK(secp256k1_gej_is_infinity(&r)); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); /* Test with space for 1 point in pippenger. That's not enough because - * ecmult_multi selects strauss which requires more memory. */ + * ecmult_multi selects strauss which requires more memory. It should + * therefore select the simple algorithm. */ scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_pippenger_scratch_size(1, 1) + PIPPENGER_SCRATCH_OBJECTS*ALIGNMENT); - CHECK(!secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, 1)); - secp256k1_scratch_destroy(scratch); + CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + secp256k1_gej_add_var(&r, &r, &r2, NULL); + CHECK(secp256k1_gej_is_infinity(&r)); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); - secp256k1_gej_neg(&r2, &r2); for(i = 1; i <= n_points; i++) { if (i > ECMULT_PIPPENGER_THRESHOLD) { int bucket_window = secp256k1_pippenger_bucket_window(i); @@ -2894,10 +3092,10 @@ void test_ecmult_multi_batching(void) { size_t scratch_size = secp256k1_strauss_scratch_size(i); scratch = secp256k1_scratch_create(&ctx->error_callback, scratch_size + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); } - CHECK(secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); + CHECK(secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &r, &scG, ecmult_multi_callback, &data, n_points)); secp256k1_gej_add_var(&r, &r, &r2, NULL); CHECK(secp256k1_gej_is_infinity(&r)); - secp256k1_scratch_destroy(scratch); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); } free(sc); free(pt); @@ -2910,15 +3108,19 @@ void run_ecmult_multi_tests(void) { test_ecmult_multi_pippenger_max_points(); scratch = secp256k1_scratch_create(&ctx->error_callback, 819200); test_ecmult_multi(scratch, secp256k1_ecmult_multi_var); + test_ecmult_multi(NULL, secp256k1_ecmult_multi_var); test_ecmult_multi(scratch, secp256k1_ecmult_pippenger_batch_single); + test_ecmult_multi_batch_single(secp256k1_ecmult_pippenger_batch_single); test_ecmult_multi(scratch, secp256k1_ecmult_strauss_batch_single); - secp256k1_scratch_destroy(scratch); + test_ecmult_multi_batch_single(secp256k1_ecmult_strauss_batch_single); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); /* Run test_ecmult_multi with space for exactly one point */ scratch = secp256k1_scratch_create(&ctx->error_callback, secp256k1_strauss_scratch_size(1) + STRAUSS_SCRATCH_OBJECTS*ALIGNMENT); test_ecmult_multi(scratch, secp256k1_ecmult_multi_var); - secp256k1_scratch_destroy(scratch); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); + test_ecmult_multi_batch_size_helper(); test_ecmult_multi_batching(); } @@ -2988,7 +3190,7 @@ void test_constant_wnaf(const secp256k1_scalar *number, int w) { } bits = 128; #endif - skew = secp256k1_wnaf_const(wnaf, num, w, bits); + skew = secp256k1_wnaf_const(wnaf, &num, w, bits); for (i = WNAF_SIZE_BITS(bits, w); i >= 0; --i) { secp256k1_scalar t; @@ -4978,8 +5180,9 @@ int main(int argc, char **argv) { } } else { FILE *frand = fopen("/dev/urandom", "r"); - if ((frand == NULL) || fread(&seed16, sizeof(seed16), 1, frand) != sizeof(seed16)) { + if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) { uint64_t t = time(NULL) * (uint64_t)1337; + fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n"); seed16[0] ^= t; seed16[1] ^= t >> 8; seed16[2] ^= t >> 16; @@ -4999,7 +5202,8 @@ int main(int argc, char **argv) { printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]); /* initialize */ - run_context_tests(); + run_context_tests(0); + run_context_tests(1); run_scratch_tests(); ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); if (secp256k1_rand_bits(1)) { diff --git a/depend/secp256k1/src/tests_exhaustive.c b/depend/secp256k1/src/tests_exhaustive.c index ab9779b02..b44e357cb 100644 --- a/depend/secp256k1/src/tests_exhaustive.c +++ b/depend/secp256k1/src/tests_exhaustive.c @@ -212,14 +212,14 @@ void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ data.pt[0] = group[x]; data.pt[1] = group[y]; - secp256k1_ecmult_multi_var(&ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); + secp256k1_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); ge_equals_gej(&group[(i * x + j * y + k) % order], &tmp); } } } } } - secp256k1_scratch_destroy(scratch); + secp256k1_scratch_destroy(&ctx->error_callback, scratch); } void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) { diff --git a/depend/secp256k1/src/util.h b/depend/secp256k1/src/util.h index e0147500f..9deb61bc5 100644 --- a/depend/secp256k1/src/util.h +++ b/depend/secp256k1/src/util.h @@ -36,7 +36,7 @@ static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * } while(0) #endif -#ifdef HAVE_BUILTIN_EXPECT +#if SECP256K1_GNUC_PREREQ(3, 0) #define EXPECT(x,c) __builtin_expect((x),(c)) #else #define EXPECT(x,c) (x) @@ -84,6 +84,47 @@ static SECP256K1_INLINE void *checked_realloc(const secp256k1_callback* cb, void return ret; } +#if defined(__BIGGEST_ALIGNMENT__) +#define ALIGNMENT __BIGGEST_ALIGNMENT__ +#else +/* Using 16 bytes alignment because common architectures never have alignment + * requirements above 8 for any of the types we care about. In addition we + * leave some room because currently we don't care about a few bytes. */ +#define ALIGNMENT 16 +#endif + +#define ROUND_TO_ALIGN(size) (((size + ALIGNMENT - 1) / ALIGNMENT) * ALIGNMENT) + +/* Assume there is a contiguous memory object with bounds [base, base + max_size) + * of which the memory range [base, *prealloc_ptr) is already allocated for usage, + * where *prealloc_ptr is an aligned pointer. In that setting, this functions + * reserves the subobject [*prealloc_ptr, *prealloc_ptr + alloc_size) of + * alloc_size bytes by increasing *prealloc_ptr accordingly, taking into account + * alignment requirements. + * + * The function returns an aligned pointer to the newly allocated subobject. + * + * This is useful for manual memory management: if we're simply given a block + * [base, base + max_size), the caller can use this function to allocate memory + * in this block and keep track of the current allocation state with *prealloc_ptr. + * + * It is VERIFY_CHECKed that there is enough space left in the memory object and + * *prealloc_ptr is aligned relative to base. + */ +static SECP256K1_INLINE void *manual_alloc(void** prealloc_ptr, size_t alloc_size, void* base, size_t max_size) { + size_t aligned_alloc_size = ROUND_TO_ALIGN(alloc_size); + void* ret; + VERIFY_CHECK(prealloc_ptr != NULL); + VERIFY_CHECK(*prealloc_ptr != NULL); + VERIFY_CHECK(base != NULL); + VERIFY_CHECK((unsigned char*)*prealloc_ptr >= (unsigned char*)base); + VERIFY_CHECK(((unsigned char*)*prealloc_ptr - (unsigned char*)base) % ALIGNMENT == 0); + VERIFY_CHECK((unsigned char*)*prealloc_ptr - (unsigned char*)base + aligned_alloc_size <= max_size); + ret = *prealloc_ptr; + *((unsigned char**)prealloc_ptr) += aligned_alloc_size; + return ret; +} + /* Macro for restrict, when available and not in a VERIFY build. */ #if defined(SECP256K1_BUILD) && defined(VERIFY) # define SECP256K1_RESTRICT diff --git a/src/ffi.rs b/src/ffi.rs index e84ad6c41..7084dd829 100644 --- a/src/ffi.rs +++ b/src/ffi.rs @@ -144,10 +144,16 @@ extern "C" { // Contexts pub fn secp256k1_context_create(flags: c_uint) -> *mut Context; + pub fn secp256k1_context_preallocated_size(flags: c_uint) -> usize; + + pub fn secp256k1_context_preallocated_create(prealloc: *mut c_void, flags: c_uint) -> *mut Context; + pub fn secp256k1_context_clone(cx: *mut Context) -> *mut Context; pub fn secp256k1_context_destroy(cx: *mut Context); + pub fn secp256k1_context_preallocated_destroy(cx: *mut Context); + pub fn secp256k1_context_randomize(cx: *mut Context, seed32: *const c_uchar) -> c_int; @@ -255,6 +261,67 @@ extern "C" { ) -> c_int; } + +#[no_mangle] +/// **This function is an override for the C function, this is the an edited version of the original description:** +/// +/// A callback function to be called when an illegal argument is passed to +/// an API call. It will only trigger for violations that are mentioned +/// explicitly in the header. **This will cause a panic**. +/// +/// The philosophy is that these shouldn't be dealt with through a +/// specific return value, as calling code should not have branches to deal with +/// the case that this code itself is broken. +/// +/// On the other hand, during debug stage, one would want to be informed about +/// such mistakes, and the default (crashing) may be inadvisable. +/// When this callback is triggered, the API function called is guaranteed not +/// to cause a crash, though its return value and output arguments are +/// undefined. +/// +/// See also secp256k1_default_error_callback_fn. +/// +pub unsafe extern "C" fn secp256k1_default_illegal_callback_fn(message: *const c_char, _data: *mut c_void) { + use core::{str, slice}; + let msg_slice = slice::from_raw_parts(message as *const u8, strlen(message)); + let msg = str::from_utf8_unchecked(msg_slice); + panic!("[libsecp256k1] illegal argument. {}", msg); +} + +#[no_mangle] +/// **This function is an override for the C function, this is the an edited version of the original description:** +/// +/// A callback function to be called when an internal consistency check +/// fails. **This will cause a panic**. +/// +/// This can only trigger in case of a hardware failure, miscompilation, +/// memory corruption, serious bug in the library, or other error would can +/// otherwise result in undefined behaviour. It will not trigger due to mere +/// incorrect usage of the API (see secp256k1_default_illegal_callback_fn +/// for that). After this callback returns, anything may happen, including +/// crashing. +/// +/// See also secp256k1_default_illegal_callback_fn. +/// +pub unsafe extern "C" fn secp256k1_default_error_callback_fn(message: *const c_char, _data: *mut c_void) { + // Do we need to deref the message and print it? if so without std we'll need to use `strlen` + use core::{str, slice}; + let msg_slice = slice::from_raw_parts(message as *const u8, strlen(message)); + let msg = str::from_utf8_unchecked(msg_slice); + panic!("[libsecp256k1] internal consistency check failed {}", msg); +} + + +unsafe fn strlen(mut str_ptr: *const c_char) -> usize { + let mut ctr = 0; + while *str_ptr != '\0' as c_char { + ctr += 1; + str_ptr = str_ptr.offset(1); + } + ctr +} + + #[cfg(feature = "fuzztarget")] mod fuzz_dummy { extern crate std; @@ -602,3 +669,18 @@ mod fuzz_dummy { } #[cfg(feature = "fuzztarget")] pub use self::fuzz_dummy::*; + + +#[cfg(test)] +mod tests { + use std::ffi::CString; + use super::strlen; + + #[test] + fn test_strlen() { + let orig = "test strlen \t \n"; + let test = CString::new(orig).unwrap(); + + assert_eq!(orig.len(), unsafe {strlen(test.as_ptr())}); + } +} \ No newline at end of file diff --git a/src/lib.rs b/src/lib.rs index 0947ce74c..98aec93e5 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -159,6 +159,8 @@ pub use key::SecretKey; pub use key::PublicKey; use core::marker::PhantomData; use core::ops::Deref; +use self::types::c_uint; +use types::c_void; /// An ECDSA signature #[derive(Copy, Clone, PartialEq, Eq)] @@ -463,6 +465,8 @@ pub enum Error { InvalidRecoveryId, /// Invalid tweak for add_*_assign or mul_*_assign InvalidTweak, + /// Didn't pass enough memory to `new_preallocated_internal` + NotEnoughMemory, } impl Error { @@ -475,6 +479,7 @@ impl Error { Error::InvalidSecretKey => "secp: malformed or out-of-range secret key", Error::InvalidRecoveryId => "secp: bad recovery id", Error::InvalidTweak => "secp: bad tweak", + Error::NotEnoughMemory => "secp: not enough memory allocated", } } } @@ -513,26 +518,29 @@ impl Verification for VerifyOnly {} impl Verification for All {} /// The secp256k1 engine, used to execute all signature operations -pub struct Secp256k1 { +pub struct Secp256k1<'buf, C> { ctx: *mut ffi::Context, - phantom: PhantomData + phantom: PhantomData, + _buf: Option<&'buf [u8]>, } // The underlying secp context does not contain any references to memory it does not own -unsafe impl Send for Secp256k1 {} +unsafe impl<'buf, C> Send for Secp256k1<'buf, C> {} // The API does not permit any mutation of `Secp256k1` objects except through `&mut` references -unsafe impl Sync for Secp256k1 {} +unsafe impl<'buf, C> Sync for Secp256k1<'buf, C> {} -impl Clone for Secp256k1 { - fn clone(&self) -> Secp256k1 { +#[cfg(feature = "std")] +impl<'buf, C> Clone for Secp256k1<'buf, C> { + fn clone(&self) -> Secp256k1<'static, C> { Secp256k1 { ctx: unsafe { ffi::secp256k1_context_clone(self.ctx) }, - phantom: self.phantom + phantom: self.phantom, + _buf: None, } } } -impl PartialEq for Secp256k1 { +impl<'buf, C> PartialEq for Secp256k1<'buf, C> { fn eq(&self, _other: &Secp256k1) -> bool { true } } @@ -566,60 +574,90 @@ impl Deref for SerializedSignature { impl Eq for SerializedSignature {} -impl Eq for Secp256k1 { } +impl<'buf, C> Eq for Secp256k1<'buf, C> { } -impl Drop for Secp256k1 { +impl<'buf, C> Drop for Secp256k1<'buf, C> { fn drop(&mut self) { - unsafe { ffi::secp256k1_context_destroy(self.ctx); } + match self._buf { + None => unsafe { ffi::secp256k1_context_destroy(self.ctx) }, + Some(_) => unsafe { ffi::secp256k1_context_preallocated_destroy(self.ctx) }, + } } } -impl fmt::Debug for Secp256k1 { +impl<'buf> fmt::Debug for Secp256k1<'buf, SignOnly> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "", self.ctx) } } -impl fmt::Debug for Secp256k1 { +impl<'buf> fmt::Debug for Secp256k1<'buf, VerifyOnly> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "", self.ctx) } } -impl fmt::Debug for Secp256k1 { +impl<'buf> fmt::Debug for Secp256k1<'buf, All> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "", self.ctx) } } -impl Secp256k1 { +impl<'buf> Secp256k1<'buf, All> { + const FLAGS: c_uint = ffi::SECP256K1_START_SIGN | ffi::SECP256K1_START_VERIFY; /// Creates a new Secp256k1 context with all capabilities - pub fn new() -> Secp256k1 { - Secp256k1 { ctx: unsafe { ffi::secp256k1_context_create(ffi::SECP256K1_START_SIGN | ffi::SECP256K1_START_VERIFY) }, phantom: PhantomData } + pub fn new() -> Secp256k1<'buf, All> { + Secp256k1 { ctx: unsafe { ffi::secp256k1_context_create(Self::FLAGS) }, phantom: PhantomData, _buf: None } + } + + pub fn get_preallocated_size() -> usize { + Self::get_preallocated_size_internal(Self::FLAGS) + } + + pub fn new_preallocated(buf: &'buf mut [u8]) -> Result, Error> { + Self::new_preallocated_internal(buf, Self::FLAGS) } } -impl Default for Secp256k1 { +impl<'buf> Default for Secp256k1<'buf, All> { fn default() -> Self { Self::new() } } -impl Secp256k1 { +impl<'buf> Secp256k1<'buf, SignOnly> { + const FLAGS: c_uint = ffi::SECP256K1_START_SIGN; /// Creates a new Secp256k1 context that can only be used for signing - pub fn signing_only() -> Secp256k1 { - Secp256k1 { ctx: unsafe { ffi::secp256k1_context_create(ffi::SECP256K1_START_SIGN) }, phantom: PhantomData } + pub fn signing_only() -> Secp256k1<'buf, SignOnly> { + Secp256k1 { ctx: unsafe { ffi::secp256k1_context_create(Self::FLAGS) }, phantom: PhantomData, _buf: None } + } + + pub fn get_preallocated_signing_only_size() -> usize { + Self::get_preallocated_size_internal(Self::FLAGS) + } + + pub fn new_preallocated_signing_only(buf: &'buf mut [u8]) -> Result, Error>{ + Self::new_preallocated_internal(buf, Self::FLAGS) } } -impl Secp256k1 { +impl<'buf> Secp256k1<'buf, VerifyOnly> { + const FLAGS: c_uint = ffi::SECP256K1_START_VERIFY; /// Creates a new Secp256k1 context that can only be used for verification - pub fn verification_only() -> Secp256k1 { - Secp256k1 { ctx: unsafe { ffi::secp256k1_context_create(ffi::SECP256K1_START_VERIFY) }, phantom: PhantomData } + pub fn verification_only() -> Secp256k1<'buf, VerifyOnly> { + Secp256k1 { ctx: unsafe { ffi::secp256k1_context_create(Self::FLAGS) }, phantom: PhantomData, _buf: None } + } + + pub fn get_preallocated_verification_only_size() -> usize { + Self::get_preallocated_size_internal(Self::FLAGS) + } + + pub fn new_preallocated_verification_only(buf: &'buf mut [u8]) -> Result, Error>{ + Self::new_preallocated_internal(buf, Self::FLAGS) } } -impl Secp256k1 { +impl<'buf, C> Secp256k1<'buf, C> { /// Getter for the raw pointer to the underlying secp256k1 context. This /// shouldn't be needed with normal usage of the library. It enables @@ -650,9 +688,25 @@ impl Secp256k1 { } } + pub(crate) fn get_preallocated_size_internal(flags: c_uint) -> usize { + unsafe { ffi::secp256k1_context_preallocated_size(flags) } + } + + pub(crate) fn new_preallocated_internal(buf: &'buf mut [u8], flags: c_uint) -> Result, Error> { + if buf.len() < Self::get_preallocated_size_internal(flags) { + return Err(Error::NotEnoughMemory); + } + + Ok(Secp256k1 { + ctx: unsafe { ffi::secp256k1_context_preallocated_create(buf.as_mut_ptr() as *mut c_void, flags) }, + phantom: PhantomData, + _buf: Some(buf) + }) + } + } -impl Secp256k1 { +impl<'buf, C: Signing> Secp256k1<'buf, C> { /// Constructs a signature for `msg` using the secret key `sk` and RFC6979 nonce /// Requires a signing-capable context. @@ -685,7 +739,7 @@ impl Secp256k1 { } } -impl Secp256k1 { +impl<'buf, C: Verification> Secp256k1<'buf, C> { /// Checks that `sig` is a valid ECDSA signature for `msg` using the public /// key `pubkey`. Returns `Ok(true)` on success. Note that this function cannot /// be used for Bitcoin consensus checking since there may exist signatures @@ -741,6 +795,7 @@ mod tests { use super::constants; use super::{Secp256k1, Signature, Message}; use super::Error::{InvalidMessage, IncorrectSignature, InvalidSignature}; + use ::{SignOnly, VerifyOnly, All}; macro_rules! hex { ($hex:expr) => ({ @@ -756,6 +811,23 @@ mod tests { let vrfy = Secp256k1::verification_only(); let full = Secp256k1::new(); + capabilities_internal(sign, vrfy, full); + + } + + #[test] + fn preallocated_capabilities() { + let mut sign_buf = vec![0u8; Secp256k1::get_preallocated_signing_only_size()]; + let mut verify_buf = vec![0u8; Secp256k1::get_preallocated_verification_only_size()]; + let mut all_buf = vec![0u8; Secp256k1::get_preallocated_size()]; + let sign: Secp256k1 = Secp256k1::new_preallocated_signing_only(&mut sign_buf).unwrap(); + let vrfy: Secp256k1 = Secp256k1::new_preallocated_verification_only(&mut verify_buf).unwrap(); + let full: Secp256k1 = Secp256k1::new_preallocated(&mut all_buf).unwrap(); + + capabilities_internal(sign, vrfy, full); + } + + fn capabilities_internal(sign: Secp256k1, vrfy: Secp256k1, full: Secp256k1) { let mut msg = [0u8; 32]; thread_rng().fill_bytes(&mut msg); let msg = Message::from_slice(&msg).unwrap(); diff --git a/src/types.rs b/src/types.rs index 4e12c90a1..3ed1ef661 100644 --- a/src/types.rs +++ b/src/types.rs @@ -5,6 +5,46 @@ pub type c_int = i32; pub type c_uchar = u8; pub type c_uint = u32; +/// This is an exact copy of https://doc.rust-lang.org/src/std/os/raw/mod.rs.html +/// `c_char` might be either signed or unsigned, defined by the platform, +/// This should match the right type using conditional compilations from stdlib. +#[cfg(any(all(target_os = "linux", any(target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "s390x")), + all(target_os = "android", any(target_arch = "aarch64", + target_arch = "arm")), + all(target_os = "l4re", target_arch = "x86_64"), + all(target_os = "freebsd", any(target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64")), + all(target_os = "netbsd", any(target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc")), + all(target_os = "openbsd", target_arch = "aarch64"), + all(target_os = "fuchsia", target_arch = "aarch64")))] +pub type c_char = u8; +#[cfg(not(any(all(target_os = "linux", any(target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "s390x")), + all(target_os = "android", any(target_arch = "aarch64", + target_arch = "arm")), + all(target_os = "l4re", target_arch = "x86_64"), + all(target_os = "freebsd", any(target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc", + target_arch = "powerpc64")), + all(target_os = "netbsd", any(target_arch = "aarch64", + target_arch = "arm", + target_arch = "powerpc")), + all(target_os = "openbsd", target_arch = "aarch64"), + all(target_os = "fuchsia", target_arch = "aarch64"))))] +pub type c_char = i8; + /// This is an exact copy of https://doc.rust-lang.org/core/ffi/enum.c_void.html /// It should be Equivalent to C's void type when used as a pointer. ///