diff --git a/secp256k1-zkp-sys/build.rs b/secp256k1-zkp-sys/build.rs index 1f972b73..f64f20a4 100644 --- a/secp256k1-zkp-sys/build.rs +++ b/secp256k1-zkp-sys/build.rs @@ -44,6 +44,9 @@ fn main() { .define("ENABLE_MODULE_RANGEPROOF", Some("1")) .define("ENABLE_MODULE_ECDSA_ADAPTOR", Some("1")) .define("ENABLE_MODULE_WHITELIST", Some("1")) + .define("ENABLE_MODULE_EXTRAKEYS", Some("1")) + .define("ENABLE_MODULE_MUSIG", Some("1")) + .define("ENABLE_MODULE_SCHNORRSIG", Some("1")) .define("ECMULT_GEN_PREC_BITS", Some("4")) // TODO these three should be changed to use libgmp, at least until secp PR 290 is merged .define("USE_NUM_NONE", Some("1")) diff --git a/secp256k1-zkp-sys/depend/secp256k1-HEAD-revision.txt b/secp256k1-zkp-sys/depend/secp256k1-HEAD-revision.txt index a498b2f3..6f6a8b77 100644 --- a/secp256k1-zkp-sys/depend/secp256k1-HEAD-revision.txt +++ b/secp256k1-zkp-sys/depend/secp256k1-HEAD-revision.txt @@ -1,2 +1,2 @@ # This file was automatically created by ./vendor-libsecp.sh -f3708a1ecb445b1b05a0f8fcd1da6a88f83d89c4 +0c43aae082368084fc5ec5b3eeea4e4de232ea8f diff --git a/secp256k1-zkp-sys/depend/secp256k1/.cirrus.yml b/secp256k1-zkp-sys/depend/secp256k1/.cirrus.yml index 28a5d323..b1849af4 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/.cirrus.yml +++ b/secp256k1-zkp-sys/depend/secp256k1/.cirrus.yml @@ -1,14 +1,19 @@ env: - WIDEMUL: auto - BIGNUM: auto + ### compiler options + HOST: + # Specific warnings can be disabled with -Wno-error=foo. + # -pedantic-errors is not equivalent to -Werror=pedantic and thus not implied by -Werror according to the GCC manual. + WERROR_CFLAGS: -Werror -pedantic-errors + MAKEFLAGS: -j2 + BUILD: check + ### secp256k1 config STATICPRECOMPUTATION: yes ECMULTGENPRECISION: auto ASM: no - BUILD: check + WIDEMUL: auto WITH_VALGRIND: yes - RUN_VALGRIND: no EXTRAFLAGS: - HOST: + ### secp256k1 modules ECDH: no RECOVERY: no SCHNORRSIG: no @@ -18,11 +23,11 @@ env: WHITELIST: no MUSIG: no ECDSAADAPTOR: no - EXPERIMENTAL: no - CTIMETEST: yes + ### test options + TEST_ITERS: BENCH: yes - ITERS: 2 - MAKEFLAGS: -j2 + BENCH_ITERS: 2 + CTIMETEST: yes cat_logs_snippet: &CAT_LOGS always: @@ -65,47 +70,13 @@ task: - env: {WIDEMUL: int128, RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes} - env: {WIDEMUL: int128, ECDH: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes, ECDSA_S2C: yes, RANGEPROOF: yes, WHITELIST: yes, GENERATOR: yes, MUSIG: yes, ECDSAADAPTOR: yes} - env: {WIDEMUL: int128, ASM: x86_64} - - env: {BIGNUM: no} - - env: {BIGNUM: no, RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes, ECDSA_S2C: yes, RANGEPROOF: yes, WHITELIST: yes, GENERATOR: yes, MUSIG: yes, ECDSAADAPTOR: yes} - - env: {BIGNUM: no, STATICPRECOMPUTATION: no} + - env: { RECOVERY: yes, EXPERIMENTAL: yes, SCHNORRSIG: yes, ECDSA_S2C: yes, RANGEPROOF: yes, WHITELIST: yes, GENERATOR: yes, MUSIG: yes, ECDSAADAPTOR: yes} + - env: { STATICPRECOMPUTATION: no} - env: {BUILD: distcheck, WITH_VALGRIND: no, CTIMETEST: no, BENCH: no} - env: {CPPFLAGS: -DDETERMINISTIC} - env: {CFLAGS: -O0, CTIMETEST: no} - - env: - CFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer" - LDFLAGS: "-fsanitize=undefined -fno-omit-frame-pointer" - UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1" - BIGNUM: no - ASM: x86_64 - ECDH: yes - RECOVERY: yes - EXPERIMENTAL: yes - SCHNORRSIG: yes - ECDSA_S2C: yes - RANGEPROOF: yes - WHITELIST: yes - GENERATOR: yes - MUSIG: yes - ECDSAADAPTOR: yes - CTIMETEST: no - env: { ECMULTGENPRECISION: 2 } - env: { ECMULTGENPRECISION: 8 } - - env: - RUN_VALGRIND: yes - BIGNUM: no - ASM: x86_64 - ECDH: yes - RECOVERY: yes - EXPERIMENTAL: yes - SCHNORRSIG: yes - ECDSA_S2C: yes - RANGEPROOF: yes - WHITELIST: yes - GENERATOR: yes - MUSIG: yes - ECDSAADAPTOR: yes - EXTRAFLAGS: "--disable-openssl-tests" - BUILD: matrix: - env: CC: gcc @@ -139,11 +110,6 @@ task: CC: i686-linux-gnu-gcc - env: CC: clang --target=i686-pc-linux-gnu -isystem /usr/i686-linux-gnu/include - matrix: - - env: - BIGNUM: gmp - - env: - BIGNUM: no << : *MERGE_BASE test_script: - ./ci/cirrus.sh @@ -153,8 +119,8 @@ task: name: "x86_64: macOS Catalina" macos_instance: image: catalina-base - # As of d4ca81f48e tasks with valgrind enabled take about 60 minutes - timeout_in: 90m + # tasks with valgrind enabled take about 90 minutes + timeout_in: 120m env: HOMEBREW_NO_AUTO_UPDATE: 1 HOMEBREW_NO_INSTALL_CLEANUP: 1 @@ -204,7 +170,7 @@ task: # If we haven't restored from cached (and just run brew install), this is a no-op. - brew link valgrind brew_script: - - brew install automake libtool gmp gcc@9 + - brew install automake libtool gcc@9 << : *MERGE_BASE test_script: - ./ci/cirrus.sh @@ -217,11 +183,10 @@ task: cpu: 1 memory: 1G env: - QEMU_CMD: qemu-s390x + WRAPPER_CMD: qemu-s390x + TEST_ITERS: 16 HOST: s390x-linux-gnu - BUILD: WITH_VALGRIND: no - BIGNUM: no ECDH: yes RECOVERY: yes EXPERIMENTAL: yes @@ -239,3 +204,165 @@ task: - rm /etc/ld.so.cache - ./ci/cirrus.sh << : *CAT_LOGS + +task: + name: "ARM32: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-arm + TEST_ITERS: 16 + HOST: arm-linux-gnueabihf + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + matrix: + - env: {} + - env: {ASM: arm} + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "ARM64: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-aarch64 + TEST_ITERS: 16 + HOST: aarch64-linux-gnu + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "ppc64le: Linux (Debian stable, QEMU)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: qemu-ppc64le + TEST_ITERS: 16 + HOST: powerpc64le-linux-gnu + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "x86_64 (mingw32-w64): Windows (Debian stable, Wine)" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + WRAPPER_CMD: wine64-stable + TEST_ITERS: 16 + HOST: x86_64-w64-mingw32 + WITH_VALGRIND: no + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + CTIMETEST: no + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +# Sanitizers +task: + timeout_in: 120m + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 2G + env: + ECDH: yes + RECOVERY: yes + EXPERIMENTAL: yes + SCHNORRSIG: yes + ECDSA_S2C: yes + RANGEPROOF: yes + WHITELIST: yes + GENERATOR: yes + MUSIG: yes + ECDSAADAPTOR: yes + CTIMETEST: no + EXTRAFLAGS: "--disable-openssl-tests" + matrix: + - name: "Valgrind (memcheck)" + env: + # The `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html) + WRAPPER_CMD: "valgrind --error-exitcode=42" + TEST_ITERS: 8 + - name: "UBSan, ASan, LSan" + env: + CFLAGS: "-fsanitize=undefined,address" + CFLAGS_FOR_BUILD: "-fsanitize=undefined,address" + UBSAN_OPTIONS: "print_stacktrace=1:halt_on_error=1" + ASAN_OPTIONS: "strict_string_checks=1:detect_stack_use_after_return=1:detect_leaks=1" + LSAN_OPTIONS: "use_unaligned=1" + TEST_ITERS: 32 + # Try to cover many configurations with just a tiny matrix. + matrix: + - env: + ASM: auto + STATICPRECOMPUTATION: yes + - env: + ASM: no + STATICPRECOMPUTATION: no + ECMULTGENPRECISION: 2 + matrix: + - env: + CC: clang + - env: + HOST: i686-linux-gnu + CC: i686-linux-gnu-gcc + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS + +task: + name: "C++ -fpermissive" + container: + dockerfile: ci/linux-debian.Dockerfile + cpu: 1 + memory: 1G + env: + # ./configure correctly errors out when given CC=g++. + # We hack around this by passing CC=g++ only to make. + CC: gcc + MAKEFLAGS: -j2 CC=g++ CFLAGS=-fpermissive + WERROR_CFLAGS: + EXPERIMENTAL: yes + ECDH: yes + RECOVERY: yes + SCHNORRSIG: yes + << : *MERGE_BASE + test_script: + - ./ci/cirrus.sh + << : *CAT_LOGS diff --git a/secp256k1-zkp-sys/depend/secp256k1/.gitignore b/secp256k1-zkp-sys/depend/secp256k1/.gitignore index cdfad486..f937d916 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/.gitignore +++ b/secp256k1-zkp-sys/depend/secp256k1/.gitignore @@ -25,6 +25,7 @@ aclocal.m4 autom4te.cache/ config.log config.status +conftest* *.tar.gz *.la libtool @@ -35,6 +36,14 @@ libtool *~ *.log *.trs + +coverage/ +coverage.html +coverage.*.html +*.gcda +*.gcno +*.gcov + src/libsecp256k1-config.h src/libsecp256k1-config.h.in src/ecmult_static_context.h @@ -54,3 +63,5 @@ build-aux/test-driver src/stamp-h1 libsecp256k1.pc contrib/gh-pr-create.sh + +example_musig \ No newline at end of file diff --git a/secp256k1-zkp-sys/depend/secp256k1/Makefile.am b/secp256k1-zkp-sys/depend/secp256k1/Makefile.am index 40c139c8..040776cd 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/Makefile.am +++ b/secp256k1-zkp-sys/depend/secp256k1/Makefile.am @@ -1,5 +1,9 @@ ACLOCAL_AMFLAGS = -I build-aux/m4 +# AM_CFLAGS will be automatically prepended to CFLAGS by Automake when compiling some foo +# which does not have an explicit foo_CFLAGS variable set. +AM_CFLAGS = $(SECP_CFLAGS) + lib_LTLIBRARIES = libsecp256k1.la include_HEADERS = include/secp256k1.h include_HEADERS += include/rustsecp256k1zkp_v0_4_0_preallocated.h @@ -14,8 +18,6 @@ noinst_HEADERS += src/scalar_8x32_impl.h noinst_HEADERS += src/scalar_low_impl.h noinst_HEADERS += src/group.h noinst_HEADERS += src/group_impl.h -noinst_HEADERS += src/num_gmp.h -noinst_HEADERS += src/num_gmp_impl.h noinst_HEADERS += src/eccommit.h noinst_HEADERS += src/eccommit_impl.h noinst_HEADERS += src/ecdsa.h @@ -28,14 +30,16 @@ noinst_HEADERS += src/ecmult_const.h noinst_HEADERS += src/ecmult_const_impl.h noinst_HEADERS += src/ecmult_gen.h noinst_HEADERS += src/ecmult_gen_impl.h -noinst_HEADERS += src/num.h -noinst_HEADERS += src/num_impl.h noinst_HEADERS += src/field_10x26.h noinst_HEADERS += src/field_10x26_impl.h noinst_HEADERS += src/field_5x52.h noinst_HEADERS += src/field_5x52_impl.h noinst_HEADERS += src/field_5x52_int128_impl.h noinst_HEADERS += src/field_5x52_asm_impl.h +noinst_HEADERS += src/modinv32.h +noinst_HEADERS += src/modinv32_impl.h +noinst_HEADERS += src/modinv64.h +noinst_HEADERS += src/modinv64_impl.h noinst_HEADERS += src/assumptions.h noinst_HEADERS += src/util.h noinst_HEADERS += src/scratch.h @@ -70,7 +74,7 @@ endif endif librustsecp256k1zkp_v0_4_0_la_SOURCES = src/secp256k1.c -librustsecp256k1zkp_v0_4_0_la_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) +librustsecp256k1zkp_v0_4_0_la_CPPFLAGS = -I$(top_srcdir)/include -I$(top_srcdir)/src $(SECP_INCLUDES) librustsecp256k1zkp_v0_4_0_la_LIBADD = $(SECP_LIBS) $(COMMON_LIB) if VALGRIND_ENABLED @@ -83,27 +87,27 @@ noinst_PROGRAMS += bench_verify bench_sign bench_internal bench_ecmult bench_verify_SOURCES = src/bench_verify.c bench_verify_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) # SECP_TEST_INCLUDES are only used here for CRYPTO_CPPFLAGS -bench_verify_CPPFLAGS = -DSECP256K1_BUILD $(SECP_TEST_INCLUDES) +bench_verify_CPPFLAGS = $(SECP_TEST_INCLUDES) bench_sign_SOURCES = src/bench_sign.c bench_sign_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_TEST_LIBS) $(COMMON_LIB) bench_internal_SOURCES = src/bench_internal.c bench_internal_LDADD = $(SECP_LIBS) $(COMMON_LIB) -bench_internal_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) +bench_internal_CPPFLAGS = $(SECP_INCLUDES) bench_ecmult_SOURCES = src/bench_ecmult.c bench_ecmult_LDADD = $(SECP_LIBS) $(COMMON_LIB) -bench_ecmult_CPPFLAGS = -DSECP256K1_BUILD $(SECP_INCLUDES) +bench_ecmult_CPPFLAGS = $(SECP_INCLUDES) endif TESTS = if USE_TESTS noinst_PROGRAMS += tests tests_SOURCES = src/tests.c -tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) +tests_CPPFLAGS = -I$(top_srcdir)/src -I$(top_srcdir)/include $(SECP_INCLUDES) $(SECP_TEST_INCLUDES) if VALGRIND_ENABLED tests_CPPFLAGS += -DVALGRIND noinst_PROGRAMS += valgrind_ctime_test valgrind_ctime_test_SOURCES = src/valgrind_ctime_test.c -valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(SECP_LIBS) $(COMMON_LIB) +valgrind_ctime_test_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB) endif if !ENABLE_COVERAGE tests_CPPFLAGS += -DVERIFY @@ -116,7 +120,7 @@ endif if USE_EXHAUSTIVE_TESTS noinst_PROGRAMS += exhaustive_tests exhaustive_tests_SOURCES = src/tests_exhaustive.c -exhaustive_tests_CPPFLAGS = -DSECP256K1_BUILD -I$(top_srcdir)/src $(SECP_INCLUDES) +exhaustive_tests_CPPFLAGS = -I$(top_srcdir)/src $(SECP_INCLUDES) if !ENABLE_COVERAGE exhaustive_tests_CPPFLAGS += -DVERIFY endif @@ -131,10 +135,10 @@ CPPFLAGS_FOR_BUILD +=-I$(top_srcdir) -I$(builddir)/src gen_context_OBJECTS = gen_context.o gen_context_BIN = gen_context$(BUILD_EXEEXT) gen_%.o: src/gen_%.c src/libsecp256k1-config.h - $(CC_FOR_BUILD) $(CPPFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ + $(CC_FOR_BUILD) $(DEFS) $(CPPFLAGS_FOR_BUILD) $(SECP_CFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) -c $< -o $@ $(gen_context_BIN): $(gen_context_OBJECTS) - $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@ + $(CC_FOR_BUILD) $(SECP_CFLAGS_FOR_BUILD) $(CFLAGS_FOR_BUILD) $(LDFLAGS_FOR_BUILD) $^ -o $@ $(librustsecp256k1zkp_v0_4_0_la_OBJECTS): src/ecmult_static_context.h $(tests_OBJECTS): src/ecmult_static_context.h diff --git a/secp256k1-zkp-sys/depend/secp256k1/README.md b/secp256k1-zkp-sys/depend/secp256k1/README.md index 43dc4238..f4ce2257 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/README.md +++ b/secp256k1-zkp-sys/depend/secp256k1/README.md @@ -17,6 +17,7 @@ Features: * Suitable for embedded systems. * Optional module for public key recovery. * Optional module for ECDH key exchange. +* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki) (experimental). * Optional module for ECDSA adaptor signatures (experimental). Experimental features have not received enough scrutiny to satisfy the standard of quality of this library but are made available for testing and review by the community. The APIs of these features should not be considered stable. @@ -35,11 +36,11 @@ Implementation details * Optimized implementation of arithmetic modulo the curve's field size (2^256 - 0x1000003D1). * Using 5 52-bit limbs (including hand-optimized assembly for x86_64, by Diederik Huys). * Using 10 26-bit limbs (including hand-optimized assembly for 32-bit ARM, by Wladimir J. van der Laan). - * Field inverses and square roots using a sliding window over blocks of 1s (by Peter Dettman). * Scalar operations * Optimized implementation without data-dependent branches of arithmetic modulo the curve's order. * Using 4 64-bit limbs (relying on __int128 support in the compiler). * Using 8 32-bit limbs. +* Modular inverses (both field elements and scalars) based on [safegcd](https://gcd.cr.yp.to/index.html) with some modifications, and a variable-time variant (by Peter Dettman). * Group operations * Point addition formula specifically simplified for the curve equation (y^2 = x^3 + 7). * Use addition between points in Jacobian and affine coordinates where possible. @@ -97,7 +98,8 @@ To create a report, `gcovr` is recommended, as it includes branch coverage repor To create a HTML report with coloured and annotated source code: - $ gcovr --exclude 'src/bench*' --html --html-details -o coverage.html + $ mkdir -p coverage + $ gcovr --exclude 'src/bench*' --html --html-details -o coverage/coverage.html Reporting a vulnerability ------------ diff --git a/secp256k1-zkp-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 b/secp256k1-zkp-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 index 7b48a5e5..8245b2b8 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 +++ b/secp256k1-zkp-sys/depend/secp256k1/build-aux/m4/bitcoin_secp.m4 @@ -75,19 +75,6 @@ if test x"$has_libcrypto" = x"yes" && test x"$has_openssl_ec" = x; then fi ]) -dnl -AC_DEFUN([SECP_GMP_CHECK],[ -if test x"$has_gmp" != x"yes"; then - CPPFLAGS_TEMP="$CPPFLAGS" - CPPFLAGS="$GMP_CPPFLAGS $CPPFLAGS" - LIBS_TEMP="$LIBS" - LIBS="$GMP_LIBS $LIBS" - AC_CHECK_HEADER(gmp.h,[AC_CHECK_LIB(gmp, __gmpz_init,[has_gmp=yes; GMP_LIBS="$GMP_LIBS -lgmp"; AC_DEFINE(HAVE_LIBGMP,1,[Define this symbol if libgmp is installed])])]) - CPPFLAGS="$CPPFLAGS_TEMP" - LIBS="$LIBS_TEMP" -fi -]) - AC_DEFUN([SECP_VALGRIND_CHECK],[ if test x"$has_valgrind" != x"yes"; then CPPFLAGS_TEMP="$CPPFLAGS" @@ -95,3 +82,19 @@ if test x"$has_valgrind" != x"yes"; then AC_CHECK_HEADER([valgrind/memcheck.h], [has_valgrind=yes; AC_DEFINE(HAVE_VALGRIND,1,[Define this symbol if valgrind is installed])]) fi ]) + +dnl SECP_TRY_APPEND_CFLAGS(flags, VAR) +dnl Append flags to VAR if CC accepts them. +AC_DEFUN([SECP_TRY_APPEND_CFLAGS], [ + AC_MSG_CHECKING([if ${CC} supports $1]) + SECP_TRY_APPEND_CFLAGS_saved_CFLAGS="$CFLAGS" + CFLAGS="$1 $CFLAGS" + AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], [flag_works=yes], [flag_works=no]) + AC_MSG_RESULT($flag_works) + CFLAGS="$SECP_TRY_APPEND_CFLAGS_saved_CFLAGS" + if test x"$flag_works" = x"yes"; then + $2="$$2 $1" + fi + unset flag_works + AC_SUBST($2) +]) diff --git a/secp256k1-zkp-sys/depend/secp256k1/ci/cirrus.sh b/secp256k1-zkp-sys/depend/secp256k1/ci/cirrus.sh index 785a522a..c32471ef 100755 --- a/secp256k1-zkp-sys/depend/secp256k1/ci/cirrus.sh +++ b/secp256k1-zkp-sys/depend/secp256k1/ci/cirrus.sh @@ -14,7 +14,7 @@ valgrind --version || true ./configure \ --enable-experimental="$EXPERIMENTAL" \ - --with-test-override-wide-multiply="$WIDEMUL" --with-bignum="$BIGNUM" --with-asm="$ASM" \ + --with-test-override-wide-multiply="$WIDEMUL" --with-asm="$ASM" \ --enable-ecmult-static-precomputation="$STATICPRECOMPUTATION" --with-ecmult-gen-precision="$ECMULTGENPRECISION" \ --enable-module-ecdh="$ECDH" --enable-module-recovery="$RECOVERY" \ --enable-module-ecdsa-s2c="$ECDSA_S2C" \ @@ -27,42 +27,27 @@ valgrind --version || true make # Print information about binaries so that we can see that the architecture is correct -file *tests || true +file *tests* || true file bench_* || true file .libs/* || true -if [ -n "$BUILD" ] -then - make "$BUILD" -fi +# This tells `make check` to wrap test invocations. +export LOG_COMPILER="$WRAPPER_CMD" -if [ "$RUN_VALGRIND" = "yes" ] -then - # the `--error-exitcode` is required to make the test fail if valgrind found errors, otherwise it'll return 0 (https://www.valgrind.org/docs/manual/manual-core.html) - valgrind --error-exitcode=42 ./tests 16 - valgrind --error-exitcode=42 ./exhaustive_tests -fi +# This limits the iterations in the tests and benchmarks. +export SECP256K1_TEST_ITERS="$TEST_ITERS" +export SECP256K1_BENCH_ITERS="$BENCH_ITERS" -if [ -n "$QEMU_CMD" ] -then - $QEMU_CMD ./tests 16 - $QEMU_CMD ./exhaustive_tests -fi +make "$BUILD" if [ "$BENCH" = "yes" ] then # Using the local `libtool` because on macOS the system's libtool has nothing to do with GNU libtool EXEC='./libtool --mode=execute' - if [ -n "$QEMU_CMD" ] - then - EXEC="$EXEC $QEMU_CMD" - fi - if [ "$RUN_VALGRIND" = "yes" ] + if [ -n "$WRAPPER_CMD" ] then - EXEC="$EXEC valgrind --error-exitcode=42" + EXEC="$EXEC $WRAPPER_CMD" fi - # This limits the iterations in the benchmarks below to ITER iterations. - export SECP256K1_BENCH_ITERS="$ITERS" { $EXEC ./bench_ecmult $EXEC ./bench_internal diff --git a/secp256k1-zkp-sys/depend/secp256k1/ci/linux-debian.Dockerfile b/secp256k1-zkp-sys/depend/secp256k1/ci/linux-debian.Dockerfile index 201ace4f..2c02ed69 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/ci/linux-debian.Dockerfile +++ b/secp256k1-zkp-sys/depend/secp256k1/ci/linux-debian.Dockerfile @@ -2,12 +2,24 @@ FROM debian:stable RUN dpkg --add-architecture i386 RUN dpkg --add-architecture s390x +RUN dpkg --add-architecture armhf +RUN dpkg --add-architecture arm64 +RUN dpkg --add-architecture ppc64el RUN apt-get update # dkpg-dev: to make pkg-config work in cross-builds +# llvm: for llvm-symbolizer, which is used by clang's UBSan for symbolized stack traces RUN apt-get install --no-install-recommends --no-upgrade -y \ git ca-certificates \ make automake libtool pkg-config dpkg-dev valgrind qemu-user \ - gcc clang libc6-dbg libgmp-dev \ - gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libgmp-dev:i386 \ - gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x + gcc clang llvm libc6-dbg \ + g++ \ + gcc-i686-linux-gnu libc6-dev-i386-cross libc6-dbg:i386 libubsan1:i386 libasan5:i386 \ + gcc-s390x-linux-gnu libc6-dev-s390x-cross libc6-dbg:s390x \ + gcc-arm-linux-gnueabihf libc6-dev-armhf-cross libc6-dbg:armhf \ + gcc-aarch64-linux-gnu libc6-dev-arm64-cross libc6-dbg:arm64 \ + gcc-powerpc64le-linux-gnu libc6-dev-ppc64el-cross libc6-dbg:ppc64el \ + wine gcc-mingw-w64-x86-64 + +# Run a dummy command in wine to make it set up configuration +RUN wine64-stable xcopy || true diff --git a/secp256k1-zkp-sys/depend/secp256k1/configure.ac b/secp256k1-zkp-sys/depend/secp256k1/configure.ac index 17e52078..3324e624 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/configure.ac +++ b/secp256k1-zkp-sys/depend/secp256k1/configure.ac @@ -8,10 +8,6 @@ AH_TOP([#define LIBSECP256K1_CONFIG_H]) AH_BOTTOM([#endif /*LIBSECP256K1_CONFIG_H*/]) AM_INIT_AUTOMAKE([foreign subdir-objects]) -# Set -g if CFLAGS are not already set, which matches the default autoconf -# behavior (see PROG_CC in the Autoconf manual) with the exception that we don't -# set -O2 here because we set it in any case (see further down). -: ${CFLAGS="-g"} LT_INIT # Make the compilation flags quiet unless V=1 is used. @@ -42,31 +38,26 @@ AM_PROG_AS case $host_os in *darwin*) if test x$cross_compiling != xyes; then - AC_PATH_PROG([BREW],brew,) - if test x$BREW != x; then + AC_CHECK_PROG([BREW], brew, brew) + if test x$BREW = xbrew; then # These Homebrew packages may be keg-only, meaning that they won't be found # in expected paths because they may conflict with system files. Ask # Homebrew where each one is located, then adjust paths accordingly. openssl_prefix=`$BREW --prefix openssl 2>/dev/null` - gmp_prefix=`$BREW --prefix gmp 2>/dev/null` valgrind_prefix=`$BREW --prefix valgrind 2>/dev/null` if test x$openssl_prefix != x; then PKG_CONFIG_PATH="$openssl_prefix/lib/pkgconfig:$PKG_CONFIG_PATH" export PKG_CONFIG_PATH CRYPTO_CPPFLAGS="-I$openssl_prefix/include" fi - if test x$gmp_prefix != x; then - GMP_CPPFLAGS="-I$gmp_prefix/include" - GMP_LIBS="-L$gmp_prefix/lib" - fi if test x$valgrind_prefix != x; then VALGRIND_CPPFLAGS="-I$valgrind_prefix/include" fi else - AC_PATH_PROG([PORT],port,) + AC_CHECK_PROG([PORT], port, port) # If homebrew isn't installed and macports is, add the macports default paths # as a last resort. - if test x$PORT != x; then + if test x$PORT = xport; then CPPFLAGS="$CPPFLAGS -isystem /opt/local/include" LDFLAGS="$LDFLAGS -L/opt/local/lib" fi @@ -75,26 +66,41 @@ case $host_os in ;; esac -CFLAGS="-W $CFLAGS" - -warn_CFLAGS="-std=c89 -pedantic -Wall -Wextra -Wcast-align -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef -Wno-unused-function -Wno-long-long -Wno-overlength-strings" -saved_CFLAGS="$CFLAGS" -CFLAGS="$warn_CFLAGS $CFLAGS" -AC_MSG_CHECKING([if ${CC} supports ${warn_CFLAGS}]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) - -saved_CFLAGS="$CFLAGS" -CFLAGS="-fvisibility=hidden $CFLAGS" -AC_MSG_CHECKING([if ${CC} supports -fvisibility=hidden]) -AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) +# Try if some desirable compiler flags are supported and append them to SECP_CFLAGS. +# +# These are our own flags, so we append them to our own SECP_CFLAGS variable (instead of CFLAGS) as +# recommended in the automake manual (Section "Flag Variables Ordering"). CFLAGS belongs to the user +# and we are not supposed to touch it. In the Makefile, we will need to ensure that SECP_CFLAGS +# is prepended to CFLAGS when invoking the compiler so that the user always has the last word (flag). +# +# Another advantage of not touching CFLAGS is that the contents of CFLAGS will be picked up by +# libtool for compiling helper executables. For example, when compiling for Windows, libtool will +# generate entire wrapper executables (instead of simple wrapper scripts as on Unix) to ensure +# proper operation of uninstalled programs linked by libtool against the uninstalled shared library. +# These executables are compiled from C source file for which our flags may not be appropriate, +# e.g., -std=c89 flag has lead to undesirable warnings in the past. +# +# TODO We should analogously not touch CPPFLAGS and LDFLAGS but currently there are no issues. +AC_DEFUN([SECP_TRY_APPEND_DEFAULT_CFLAGS], [ + # Try to append -Werror=unknown-warning-option to CFLAGS temporarily. Otherwise clang will + # not error out if it gets unknown warning flags and the checks here will always succeed + # no matter if clang knows the flag or not. + SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS="$CFLAGS" + SECP_TRY_APPEND_CFLAGS([-Werror=unknown-warning-option], CFLAGS) + + SECP_TRY_APPEND_CFLAGS([-std=c89 -pedantic -Wno-long-long -Wnested-externs -Wshadow -Wstrict-prototypes -Wundef], $1) # GCC >= 3.0, -Wlong-long is implied by -pedantic. + SECP_TRY_APPEND_CFLAGS([-Wno-overlength-strings], $1) # GCC >= 4.2, -Woverlength-strings is implied by -pedantic. + SECP_TRY_APPEND_CFLAGS([-Wall], $1) # GCC >= 2.95 and probably many other compilers + SECP_TRY_APPEND_CFLAGS([-Wno-unused-function], $1) # GCC >= 3.0, -Wunused-function is implied by -Wall. + SECP_TRY_APPEND_CFLAGS([-Wextra], $1) # GCC >= 3.4, this is the newer name of -W, which we don't use because older GCCs will warn about unused functions. + SECP_TRY_APPEND_CFLAGS([-Wcast-align], $1) # GCC >= 2.95 + SECP_TRY_APPEND_CFLAGS([-Wcast-align=strict], $1) # GCC >= 8.0 + SECP_TRY_APPEND_CFLAGS([-Wconditional-uninitialized], $1) # Clang >= 3.0 only + SECP_TRY_APPEND_CFLAGS([-fvisibility=hidden], $1) # GCC >= 4.0 + + CFLAGS="$SECP_TRY_APPEND_DEFAULT_CFLAGS_saved_CFLAGS" +]) +SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS) ### ### Define config arguments @@ -204,9 +210,6 @@ AC_ARG_ENABLE(reduced_surjection_proof_size, # Legal values are int64 (for [u]int64_t), int128 (for [unsigned] __int128), and auto (the default). AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_widemul=auto]) -AC_ARG_WITH([bignum], [AS_HELP_STRING([--with-bignum=gmp|no|auto], -[bignum implementation to use [default=auto]])],[req_bignum=$withval], [req_bignum=auto]) - AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm|no|auto], [assembly optimizations to useĀ (experimental: arm) [default=auto]])],[req_asm=$withval], [req_asm=auto]) @@ -252,10 +255,14 @@ AM_CONDITIONAL([VALGRIND_ENABLED],[test "$enable_valgrind" = "yes"]) if test x"$enable_coverage" = x"yes"; then AC_DEFINE(COVERAGE, 1, [Define this symbol to compile out all VERIFY code]) - CFLAGS="-O0 --coverage $CFLAGS" + SECP_CFLAGS="-O0 --coverage $SECP_CFLAGS" LDFLAGS="--coverage $LDFLAGS" else - CFLAGS="-O2 $CFLAGS" + # Most likely the CFLAGS already contain -O2 because that is autoconf's default. + # We still add it here because passing it twice is not an issue, and handling + # this case would just add unnecessary complexity (see #896). + SECP_CFLAGS="-O2 $SECP_CFLAGS" + SECP_CFLAGS_FOR_BUILD="-O2 $SECP_CFLAGS_FOR_BUILD" fi AC_MSG_CHECKING([for __builtin_popcount]) @@ -297,32 +304,6 @@ else esac fi -if test x"$req_bignum" = x"auto"; then - SECP_GMP_CHECK - if test x"$has_gmp" = x"yes"; then - set_bignum=gmp - fi - - if test x"$set_bignum" = x; then - set_bignum=no - fi -else - set_bignum=$req_bignum - case $set_bignum in - gmp) - SECP_GMP_CHECK - if test x"$has_gmp" != x"yes"; then - AC_MSG_ERROR([gmp bignum explicitly requested but libgmp not available]) - fi - ;; - no) - ;; - *) - AC_MSG_ERROR([invalid bignum implementation selection]) - ;; - esac -fi - # Select assembly optimization use_external_asm=no @@ -360,24 +341,6 @@ auto) ;; esac -# Select bignum implementation -case $set_bignum in -gmp) - AC_DEFINE(HAVE_LIBGMP, 1, [Define this symbol if libgmp is installed]) - AC_DEFINE(USE_NUM_GMP, 1, [Define this symbol to use the gmp implementation for num]) - AC_DEFINE(USE_FIELD_INV_NUM, 1, [Define this symbol to use the num-based field inverse implementation]) - AC_DEFINE(USE_SCALAR_INV_NUM, 1, [Define this symbol to use the num-based scalar inverse implementation]) - ;; -no) - AC_DEFINE(USE_NUM_NONE, 1, [Define this symbol to use no num implementation]) - AC_DEFINE(USE_FIELD_INV_BUILTIN, 1, [Define this symbol to use the native field inverse implementation]) - AC_DEFINE(USE_SCALAR_INV_BUILTIN, 1, [Define this symbol to use the native scalar inverse implementation]) - ;; -*) - AC_MSG_ERROR([invalid bignum implementation]) - ;; -esac - # Set ecmult window size if test x"$req_ecmult_window" = x"auto"; then set_ecmult_window=15 @@ -442,15 +405,13 @@ else enable_openssl_tests=no fi -if test x"$set_bignum" = x"gmp"; then - SECP_LIBS="$SECP_LIBS $GMP_LIBS" - SECP_INCLUDES="$SECP_INCLUDES $GMP_CPPFLAGS" -fi - if test x"$enable_valgrind" = x"yes"; then SECP_INCLUDES="$SECP_INCLUDES $VALGRIND_CPPFLAGS" fi +# Add -Werror and similar flags passed from the outside (for testing, e.g., in CI) +SECP_CFLAGS="$SECP_CFLAGS $WERROR_CFLAGS" + # Handle static precomputation (after everything which modifies CFLAGS and friends) if test x"$use_ecmult_static_precomputation" != x"no"; then if test x"$cross_compiling" = x"no"; then @@ -460,8 +421,9 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then fi # If we're not cross-compiling, simply use the same compiler for building the static precompation code. CC_FOR_BUILD="$CC" - CFLAGS_FOR_BUILD="$CFLAGS" CPPFLAGS_FOR_BUILD="$CPPFLAGS" + SECP_CFLAGS_FOR_BUILD="$SECP_CFLAGS" + CFLAGS_FOR_BUILD="$CFLAGS" LDFLAGS_FOR_BUILD="$LDFLAGS" else AX_PROG_CC_FOR_BUILD @@ -471,22 +433,14 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then cross_compiling=no SAVE_CC="$CC" CC="$CC_FOR_BUILD" - SAVE_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS_FOR_BUILD" SAVE_CPPFLAGS="$CPPFLAGS" CPPFLAGS="$CPPFLAGS_FOR_BUILD" + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS_FOR_BUILD" SAVE_LDFLAGS="$LDFLAGS" LDFLAGS="$LDFLAGS_FOR_BUILD" - warn_CFLAGS_FOR_BUILD="-Wall -Wextra -Wno-unused-function" - saved_CFLAGS="$CFLAGS" - CFLAGS="$warn_CFLAGS_FOR_BUILD $CFLAGS" - AC_MSG_CHECKING([if native ${CC_FOR_BUILD} supports ${warn_CFLAGS_FOR_BUILD}]) - AC_COMPILE_IFELSE([AC_LANG_SOURCE([[char foo;]])], - [ AC_MSG_RESULT([yes]) ], - [ AC_MSG_RESULT([no]) - CFLAGS="$saved_CFLAGS" - ]) + SECP_TRY_APPEND_DEFAULT_CFLAGS(SECP_CFLAGS_FOR_BUILD) AC_MSG_CHECKING([for working native compiler: ${CC_FOR_BUILD}]) AC_RUN_IFELSE( @@ -494,19 +448,17 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then [working_native_cc=yes], [working_native_cc=no],[:]) - CFLAGS_FOR_BUILD="$CFLAGS" - # Restore the environment cross_compiling=$save_cross_compiling CC="$SAVE_CC" - CFLAGS="$SAVE_CFLAGS" CPPFLAGS="$SAVE_CPPFLAGS" + CFLAGS="$SAVE_CFLAGS" LDFLAGS="$SAVE_LDFLAGS" if test x"$working_native_cc" = x"no"; then AC_MSG_RESULT([no]) set_precomp=no - m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CFLAGS_FOR_BUILD, CPPFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) + m4_define([please_set_for_build], [Please set CC_FOR_BUILD, CPPFLAGS_FOR_BUILD, CFLAGS_FOR_BUILD, and/or LDFLAGS_FOR_BUILD.]) if test x"$use_ecmult_static_precomputation" = x"yes"; then AC_MSG_ERROR([native compiler ${CC_FOR_BUILD} does not produce working binaries. please_set_for_build]) else @@ -519,8 +471,9 @@ if test x"$use_ecmult_static_precomputation" != x"no"; then fi AC_SUBST(CC_FOR_BUILD) - AC_SUBST(CFLAGS_FOR_BUILD) AC_SUBST(CPPFLAGS_FOR_BUILD) + AC_SUBST(SECP_CFLAGS_FOR_BUILD) + AC_SUBST(CFLAGS_FOR_BUILD) AC_SUBST(LDFLAGS_FOR_BUILD) else set_precomp=no @@ -540,6 +493,7 @@ fi if test x"$enable_module_musig" = x"yes"; then AC_DEFINE(ENABLE_MODULE_MUSIG, 1, [Define this symbol to enable the MuSig module]) + enable_module_schnorrsig=yes fi if test x"$enable_module_recovery" = x"yes"; then @@ -561,7 +515,8 @@ fi if test x"$enable_module_surjectionproof" = x"yes"; then AC_DEFINE(ENABLE_MODULE_SURJECTIONPROOF, 1, [Define this symbol to enable the surjection proof module]) fi - +# Test if extrakeys is set _after_ the MuSig module to allow the MuSig +# module to set enable_module_schnorrsig=yes if test x"$enable_module_schnorrsig" = x"yes"; then AC_DEFINE(ENABLE_MODULE_SCHNORRSIG, 1, [Define this symbol to enable the schnorrsig module]) enable_module_extrakeys=yes @@ -672,6 +627,7 @@ AC_SUBST(SECP_INCLUDES) AC_SUBST(SECP_LIBS) AC_SUBST(SECP_TEST_LIBS) AC_SUBST(SECP_TEST_INCLUDES) +AC_SUBST(SECP_CFLAGS) AM_CONDITIONAL([ENABLE_COVERAGE], [test x"$enable_coverage" = x"yes"]) AM_CONDITIONAL([USE_TESTS], [test x"$use_tests" != x"no"]) AM_CONDITIONAL([USE_EXHAUSTIVE_TESTS], [test x"$use_exhaustive_tests" != x"no"]) @@ -711,11 +667,11 @@ echo " module ecdh = $enable_module_ecdh" echo " module recovery = $enable_module_recovery" echo " module extrakeys = $enable_module_extrakeys" echo " module schnorrsig = $enable_module_schnorrsig" +echo " module musig = $enable_module_musig" echo " module ecdsa-s2c = $enable_module_ecdsa_s2c" echo " module ecdsa-adaptor = $enable_module_ecdsa_adaptor" echo echo " asm = $set_asm" -echo " bignum = $set_bignum" echo " ecmult window size = $set_ecmult_window" echo " ecmult gen prec. bits = $set_ecmult_gen_precision" # Hide test-only options unless they're used. @@ -725,13 +681,15 @@ fi echo echo " valgrind = $enable_valgrind" echo " CC = $CC" -echo " CFLAGS = $CFLAGS" echo " CPPFLAGS = $CPPFLAGS" +echo " SECP_CFLAGS = $SECP_CFLAGS" +echo " CFLAGS = $CFLAGS" echo " LDFLAGS = $LDFLAGS" echo if test x"$set_precomp" = x"yes"; then echo " CC_FOR_BUILD = $CC_FOR_BUILD" -echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD" echo " CPPFLAGS_FOR_BUILD = $CPPFLAGS_FOR_BUILD" +echo " SECP_CFLAGS_FOR_BUILD = $SECP_CFLAGS_FOR_BUILD" +echo " CFLAGS_FOR_BUILD = $CFLAGS_FOR_BUILD" echo " LDFLAGS_FOR_BUILD = $LDFLAGS_FOR_BUILD" fi diff --git a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.c b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.c index c1f1026c..fd1c11ec 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.c +++ b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.c @@ -5,7 +5,6 @@ ***********************************************************************/ #include -#include #include "lax_der_parsing.h" @@ -121,7 +120,7 @@ int rustsecp256k1zkp_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1zkp /* Copy R value */ if (rlen > 32) { overflow = 1; - } else { + } else if (rlen) { memcpy(tmpsig + 32 - rlen, input + rpos, rlen); } @@ -133,7 +132,7 @@ int rustsecp256k1zkp_v0_4_0_ecdsa_signature_parse_der_lax(const rustsecp256k1zkp /* Copy S value */ if (slen > 32) { overflow = 1; - } else { + } else if (slen) { memcpy(tmpsig + 64 - slen, input + spos, slen); } diff --git a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.h b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.h index 8a2cdd66..417b0105 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.h +++ b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_parsing.h @@ -51,7 +51,13 @@ #ifndef SECP256K1_CONTRIB_LAX_DER_PARSING_H #define SECP256K1_CONTRIB_LAX_DER_PARSING_H +/* #include secp256k1.h only when it hasn't been included yet. + This enables this file to be #included directly in other project + files (such as tests.c) without the need to set an explicit -I flag, + which would be necessary to locate secp256k1.h. */ +#ifndef SECP256K1_H #include +#endif #ifdef __cplusplus extern "C" { diff --git a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c index 63c59ebf..6edef66c 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c +++ b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.c @@ -5,7 +5,6 @@ ***********************************************************************/ #include -#include #include "lax_der_privatekey_parsing.h" @@ -45,7 +44,7 @@ int ec_privkey_import_der(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned c if (end < privkey+2 || privkey[0] != 0x04 || privkey[1] > 0x20 || end < privkey+2+privkey[1]) { return 0; } - memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); + if (privkey[1]) memcpy(out32 + 32 - privkey[1], privkey + 2, privkey[1]); if (!rustsecp256k1zkp_v0_4_0_ec_seckey_verify(ctx, out32)) { memset(out32, 0, 32); return 0; diff --git a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h index 7e484892..ef9afce2 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h +++ b/secp256k1-zkp-sys/depend/secp256k1/contrib/lax_der_privatekey_parsing.h @@ -28,7 +28,13 @@ #ifndef SECP256K1_CONTRIB_BER_PRIVATEKEY_H #define SECP256K1_CONTRIB_BER_PRIVATEKEY_H +/* #include secp256k1.h only when it hasn't been included yet. + This enables this file to be #included directly in other project + files (such as tests.c) without the need to set an explicit -I flag, + which would be necessary to locate secp256k1.h. */ +#ifndef SECP256K1_H #include +#endif #ifdef __cplusplus extern "C" { diff --git a/secp256k1-zkp-sys/depend/secp256k1/contrib/sync-upstream.sh b/secp256k1-zkp-sys/depend/secp256k1/contrib/sync-upstream.sh index c9f47d2e..55dde4e1 100755 --- a/secp256k1-zkp-sys/depend/secp256k1/contrib/sync-upstream.sh +++ b/secp256k1-zkp-sys/depend/secp256k1/contrib/sync-upstream.sh @@ -22,11 +22,11 @@ if [ "$#" -lt 1 ]; then fi REMOTE=upstream -REMOTE_BRANCH=$REMOTE/master +REMOTE_BRANCH="$REMOTE/master" # Makes sure you have a remote "upstream" that is up-to-date setup() { ret=0 - git fetch $REMOTE &> /dev/null || ret=$? + git fetch "$REMOTE" &> /dev/null || ret="$?" if [ ${ret} == 0 ]; then return fi @@ -36,18 +36,18 @@ setup() { [Yy]* ) ;; * ) exit 1;; esac - git remote add $REMOTE git@github.com:bitcoin-core/secp256k1.git &> /dev/null - git fetch $REMOTE &> /dev/null + git remote add "$REMOTE" git@github.com:bitcoin-core/secp256k1.git &> /dev/null + git fetch "$REMOTE" &> /dev/null } range() { - RANGESTART_COMMIT=$(git merge-base $REMOTE_BRANCH master) - RANGEEND_COMMIT=$(git rev-parse $REMOTE_BRANCH) + RANGESTART_COMMIT=$(git merge-base "$REMOTE_BRANCH" master) + RANGEEND_COMMIT=$(git rev-parse "$REMOTE_BRANCH") if [ "$#" = 1 ]; then RANGEEND_COMMIT=$1 fi - COMMITS=$(git --no-pager log --oneline "$REMOTE_BRANCH" --merges "$RANGESTART_COMMIT".."$RANGEEND_COMMIT") + COMMITS=$(git --no-pager log --oneline --merges "$RANGESTART_COMMIT".."$RANGEEND_COMMIT") COMMITS=$(echo "$COMMITS" | tac | awk '{ print $1 }' ORS=' ') echo "Merging $COMMITS. Continue with y" read -r yn @@ -81,9 +81,9 @@ TITLE="Upstream PRs" BODY="" for COMMIT in $COMMITS do - PRNUM=$(git log -1 "$COMMIT" --pretty=format:%s | sed s/'Merge #\([0-9]*\).*'/'\1'/) + PRNUM=$(git log -1 "$COMMIT" --pretty=format:%s | sed s/'Merge \(bitcoin-core\/secp256k1\)\?#\([0-9]*\).*'/'\2'/) TITLE="$TITLE $PRNUM," - BODY=$(printf "%s\n%s" "$BODY" "$(git log -1 "$COMMIT" --pretty=format:%s | sed s/'Merge #\([0-9]*\)'/'[bitcoin-core\/secp256k1#\1]'/)") + BODY=$(printf "%s\n%s" "$BODY" "$(git log -1 "$COMMIT" --pretty=format:%s | sed s/'Merge \(bitcoin-core\/secp256k1\)\?#\([0-9]*\)'/'[bitcoin-core\/secp256k1#\2]'/)") done # Remove trailing "," TITLE=${TITLE%?} @@ -101,7 +101,7 @@ git pull git checkout -b temp-merge-"$PRNUM" BASEDIR=$(dirname "$0") -FNAME=$BASEDIR/gh-pr-create.sh +FNAME="$BASEDIR/gh-pr-create.sh" cat < "$FNAME" #!/bin/sh gh pr create -t "$TITLE" -b "$BODY" --web diff --git a/secp256k1-zkp-sys/depend/secp256k1/doc/safegcd_implementation.md b/secp256k1-zkp-sys/depend/secp256k1/doc/safegcd_implementation.md new file mode 100644 index 00000000..3ae556f9 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/doc/safegcd_implementation.md @@ -0,0 +1,765 @@ +# The safegcd implementation in libsecp256k1 explained + +This document explains the modular inverse implementation in the `src/modinv*.h` files. It is based +on the paper +["Fast constant-time gcd computation and modular inversion"](https://gcd.cr.yp.to/papers.html#safegcd) +by Daniel J. Bernstein and Bo-Yin Yang. The references below are for the Date: 2019.04.13 version. + +The actual implementation is in C of course, but for demonstration purposes Python3 is used here. +Most implementation aspects and optimizations are explained, except those that depend on the specific +number representation used in the C code. + +## 1. Computing the Greatest Common Divisor (GCD) using divsteps + +The algorithm from the paper (section 11), at a very high level, is this: + +```python +def gcd(f, g): + """Compute the GCD of an odd integer f and another integer g.""" + assert f & 1 # require f to be odd + delta = 1 # additional state variable + while g != 0: + assert f & 1 # f will be odd in every iteration + if delta > 0 and g & 1: + delta, f, g = 1 - delta, g, (g - f) // 2 + elif g & 1: + delta, f, g = 1 + delta, f, (g + f) // 2 + else: + delta, f, g = 1 + delta, f, (g ) // 2 + return abs(f) +``` + +It computes the greatest common divisor of an odd integer *f* and any integer *g*. Its inner loop +keeps rewriting the variables *f* and *g* alongside a state variable *δ* that starts at *1*, until +*g=0* is reached. At that point, *|f|* gives the GCD. Each of the transitions in the loop is called a +"division step" (referred to as divstep in what follows). + +For example, *gcd(21, 14)* would be computed as: +- Start with *δ=1 f=21 g=14* +- Take the third branch: *δ=2 f=21 g=7* +- Take the first branch: *δ=-1 f=7 g=-7* +- Take the second branch: *δ=0 f=7 g=0* +- The answer *|f| = 7*. + +Why it works: +- Divsteps can be decomposed into two steps (see paragraph 8.2 in the paper): + - (a) If *g* is odd, replace *(f,g)* with *(g,g-f)* or (f,g+f), resulting in an even *g*. + - (b) Replace *(f,g)* with *(f,g/2)* (where *g* is guaranteed to be even). +- Neither of those two operations change the GCD: + - For (a), assume *gcd(f,g)=c*, then it must be the case that *f=a c* and *g=b c* for some integers *a* + and *b*. As *(g,g-f)=(b c,(b-a)c)* and *(f,f+g)=(a c,(a+b)c)*, the result clearly still has + common factor *c*. Reasoning in the other direction shows that no common factor can be added by + doing so either. + - For (b), we know that *f* is odd, so *gcd(f,g)* clearly has no factor *2*, and we can remove + it from *g*. +- The algorithm will eventually converge to *g=0*. This is proven in the paper (see theorem G.3). +- It follows that eventually we find a final value *f'* for which *gcd(f,g) = gcd(f',0)*. As the + gcd of *f'* and *0* is *|f'|* by definition, that is our answer. + +Compared to more [traditional GCD algorithms](https://en.wikipedia.org/wiki/Euclidean_algorithm), this one has the property of only ever looking at +the low-order bits of the variables to decide the next steps, and being easy to make +constant-time (in more low-level languages than Python). The *δ* parameter is necessary to +guide the algorithm towards shrinking the numbers' magnitudes without explicitly needing to look +at high order bits. + +Properties that will become important later: +- Performing more divsteps than needed is not a problem, as *f* does not change anymore after *g=0*. +- Only even numbers are divided by *2*. This means that when reasoning about it algebraically we + do not need to worry about rounding. +- At every point during the algorithm's execution the next *N* steps only depend on the bottom *N* + bits of *f* and *g*, and on *δ*. + + +## 2. From GCDs to modular inverses + +We want an algorithm to compute the inverse *a* of *x* modulo *M*, i.e. the number a such that *a x=1 +mod M*. This inverse only exists if the GCD of *x* and *M* is *1*, but that is always the case if *M* is +prime and *0 < x < M*. In what follows, assume that the modular inverse exists. +It turns out this inverse can be computed as a side effect of computing the GCD by keeping track +of how the internal variables can be written as linear combinations of the inputs at every step +(see the [extended Euclidean algorithm](https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm)). +Since the GCD is *1*, such an algorithm will compute numbers *a* and *b* such that a x + b M = 1*. +Taking that expression *mod M* gives *a x mod M = 1*, and we see that *a* is the modular inverse of *x +mod M*. + +A similar approach can be used to calculate modular inverses using the divsteps-based GCD +algorithm shown above, if the modulus *M* is odd. To do so, compute *gcd(f=M,g=x)*, while keeping +track of extra variables *d* and *e*, for which at every step *d = f/x (mod M)* and *e = g/x (mod M)*. +*f/x* here means the number which multiplied with *x* gives *f mod M*. As *f* and *g* are initialized to *M* +and *x* respectively, *d* and *e* just start off being *0* (*M/x mod M = 0/x mod M = 0*) and *1* (*x/x mod M += 1*). + +```python +def div2(M, x): + """Helper routine to compute x/2 mod M (where M is odd).""" + assert M & 1 + if x & 1: # If x is odd, make it even by adding M. + x += M + # x must be even now, so a clean division by 2 is possible. + return x // 2 + +def modinv(M, x): + """Compute the inverse of x mod M (given that it exists, and M is odd).""" + assert M & 1 + delta, f, g, d, e = 1, M, x, 0, 1 + while g != 0: + # Note that while division by two for f and g is only ever done on even inputs, this is + # not true for d and e, so we need the div2 helper function. + if delta > 0 and g & 1: + delta, f, g, d, e = 1 - delta, g, (g - f) // 2, e, div2(M, e - d) + elif g & 1: + delta, f, g, d, e = 1 + delta, f, (g + f) // 2, d, div2(M, e + d) + else: + delta, f, g, d, e = 1 + delta, f, (g ) // 2, d, div2(M, e ) + # Verify that the invariants d=f/x mod M, e=g/x mod M are maintained. + assert f % M == (d * x) % M + assert g % M == (e * x) % M + assert f == 1 or f == -1 # |f| is the GCD, it must be 1 + # Because of invariant d = f/x (mod M), 1/x = d/f (mod M). As |f|=1, d/f = d*f. + return (d * f) % M +``` + +Also note that this approach to track *d* and *e* throughout the computation to determine the inverse +is different from the paper. There (see paragraph 12.1 in the paper) a transition matrix for the +entire computation is determined (see section 3 below) and the inverse is computed from that. +The approach here avoids the need for 2x2 matrix multiplications of various sizes, and appears to +be faster at the level of optimization we're able to do in C. + + +## 3. Batching multiple divsteps + +Every divstep can be expressed as a matrix multiplication, applying a transition matrix *(1/2 t)* +to both vectors *[f, g]* and *[d, e]* (see paragraph 8.1 in the paper): + +``` + t = [ u, v ] + [ q, r ] + + [ out_f ] = (1/2 * t) * [ in_f ] + [ out_g ] = [ in_g ] + + [ out_d ] = (1/2 * t) * [ in_d ] (mod M) + [ out_e ] [ in_e ] +``` + +where *(u, v, q, r)* is *(0, 2, -1, 1)*, *(2, 0, 1, 1)*, or *(2, 0, 0, 1)*, depending on which branch is +taken. As above, the resulting *f* and *g* are always integers. + +Performing multiple divsteps corresponds to a multiplication with the product of all the +individual divsteps' transition matrices. As each transition matrix consists of integers +divided by *2*, the product of these matrices will consist of integers divided by *2N* (see also +theorem 9.2 in the paper). These divisions are expensive when updating *d* and *e*, so we delay +them: we compute the integer coefficients of the combined transition matrix scaled by *2N*, and +do one division by *2N* as a final step: + +```python +def divsteps_n_matrix(delta, f, g): + """Compute delta and transition matrix t after N divsteps (multiplied by 2^N).""" + u, v, q, r = 1, 0, 0, 1 # start with identity matrix + for _ in range(N): + if delta > 0 and g & 1: + delta, f, g, u, v, q, r = 1 - delta, g, (g - f) // 2, 2*q, 2*r, q-u, r-v + elif g & 1: + delta, f, g, u, v, q, r = 1 + delta, f, (g + f) // 2, 2*u, 2*v, q+u, r+v + else: + delta, f, g, u, v, q, r = 1 + delta, f, (g ) // 2, 2*u, 2*v, q , r + return delta, (u, v, q, r) +``` + +As the branches in the divsteps are completely determined by the bottom *N* bits of *f* and *g*, this +function to compute the transition matrix only needs to see those bottom bits. Furthermore all +intermediate results and outputs fit in *(N+1)*-bit numbers (unsigned for *f* and *g*; signed for *u*, *v*, +*q*, and *r*) (see also paragraph 8.3 in the paper). This means that an implementation using 64-bit +integers could set *N=62* and compute the full transition matrix for 62 steps at once without any +big integer arithmetic at all. This is the reason why this algorithm is efficient: it only needs +to update the full-size *f*, *g*, *d*, and *e* numbers once every *N* steps. + +We still need functions to compute: + +``` + [ out_f ] = (1/2^N * [ u, v ]) * [ in_f ] + [ out_g ] ( [ q, r ]) [ in_g ] + + [ out_d ] = (1/2^N * [ u, v ]) * [ in_d ] (mod M) + [ out_e ] ( [ q, r ]) [ in_e ] +``` + +Because the divsteps transformation only ever divides even numbers by two, the result of *t [f,g]* is always even. When *t* is a composition of *N* divsteps, it follows that the resulting *f* +and *g* will be multiple of *2N*, and division by *2N* is simply shifting them down: + +```python +def update_fg(f, g, t): + """Multiply matrix t/2^N with [f, g].""" + u, v, q, r = t + cf, cg = u*f + v*g, q*f + r*g + # (t / 2^N) should cleanly apply to [f,g] so the result of t*[f,g] should have N zero + # bottom bits. + assert cf % 2**N == 0 + assert cg % 2**N == 0 + return cf >> N, cg >> N +``` + +The same is not true for *d* and *e*, and we need an equivalent of the `div2` function for division by *2N mod M*. +This is easy if we have precomputed *1/M mod 2N* (which always exists for odd *M*): + +```python +def div2n(M, Mi, x): + """Compute x/2^N mod M, given Mi = 1/M mod 2^N.""" + assert (M * Mi) % 2**N == 1 + # Find a factor m such that m*M has the same bottom N bits as x. We want: + # (m * M) mod 2^N = x mod 2^N + # <=> m mod 2^N = (x / M) mod 2^N + # <=> m mod 2^N = (x * Mi) mod 2^N + m = (Mi * x) % 2**N + # Subtract that multiple from x, cancelling its bottom N bits. + x -= m * M + # Now a clean division by 2^N is possible. + assert x % 2**N == 0 + return (x >> N) % M + +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e], modulo M.""" + u, v, q, r = t + cd, ce = u*d + v*e, q*d + r*e + return div2n(M, Mi, cd), div2n(M, Mi, ce) +``` + +With all of those, we can write a version of `modinv` that performs *N* divsteps at once: + +```python3 +def modinv(M, Mi, x): + """Compute the modular inverse of x mod M, given Mi=1/M mod 2^N.""" + assert M & 1 + delta, f, g, d, e = 1, M, x, 0, 1 + while g != 0: + # Compute the delta and transition matrix t for the next N divsteps (this only needs + # (N+1)-bit signed integer arithmetic). + delta, t = divsteps_n_matrix(delta, f % 2**N, g % 2**N) + # Apply the transition matrix t to [f, g]: + f, g = update_fg(f, g, t) + # Apply the transition matrix t to [d, e]: + d, e = update_de(d, e, t, M, Mi) + return (d * f) % M +``` + +This means that in practice we'll always perform a multiple of *N* divsteps. This is not a problem +because once *g=0*, further divsteps do not affect *f*, *g*, *d*, or *e* anymore (only *δ* keeps +increasing). For variable time code such excess iterations will be mostly optimized away in later +sections. + + +## 4. Avoiding modulus operations + +So far, there are two places where we compute a remainder of big numbers modulo *M*: at the end of +`div2n` in every `update_de`, and at the very end of `modinv` after potentially negating *d* due to the +sign of *f*. These are relatively expensive operations when done generically. + +To deal with the modulus operation in `div2n`, we simply stop requiring *d* and *e* to be in range +*[0,M)* all the time. Let's start by inlining `div2n` into `update_de`, and dropping the modulus +operation at the end: + +```python +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e] mod M, given Mi=1/M mod 2^N.""" + u, v, q, r = t + cd, ce = u*d + v*e, q*d + r*e + # Cancel out bottom N bits of cd and ce. + md = -((Mi * cd) % 2**N) + me = -((Mi * ce) % 2**N) + cd += md * M + ce += me * M + # And cleanly divide by 2**N. + return cd >> N, ce >> N +``` + +Let's look at bounds on the ranges of these numbers. It can be shown that *|u|+|v|* and *|q|+|r|* +never exceed *2N* (see paragraph 8.3 in the paper), and thus a multiplication with *t* will have +outputs whose absolute values are at most *2N* times the maximum absolute input value. In case the +inputs *d* and *e* are in *(-M,M)*, which is certainly true for the initial values *d=0* and *e=1* assuming +*M > 1*, the multiplication results in numbers in range *(-2NM,2NM)*. Subtracting less than *2N* +times *M* to cancel out *N* bits brings that up to *(-2N+1M,2NM)*, and +dividing by *2N* at the end takes it to *(-2M,M)*. Another application of `update_de` would take that +to *(-3M,2M)*, and so forth. This progressive expansion of the variables' ranges can be +counteracted by incrementing *d* and *e* by *M* whenever they're negative: + +```python + ... + if d < 0: + d += M + if e < 0: + e += M + cd, ce = u*d + v*e, q*d + r*e + # Cancel out bottom N bits of cd and ce. + ... +``` + +With inputs in *(-2M,M)*, they will first be shifted into range *(-M,M)*, which means that the +output will again be in *(-2M,M)*, and this remains the case regardless of how many `update_de` +invocations there are. In what follows, we will try to make this more efficient. + +Note that increasing *d* by *M* is equal to incrementing *cd* by *u M* and *ce* by *q M*. Similarly, +increasing *e* by *M* is equal to incrementing *cd* by *v M* and *ce* by *r M*. So we could instead write: + +```python + ... + cd, ce = u*d + v*e, q*d + r*e + # Perform the equivalent of incrementing d, e by M when they're negative. + if d < 0: + cd += u*M + ce += q*M + if e < 0: + cd += v*M + ce += r*M + # Cancel out bottom N bits of cd and ce. + md = -((Mi * cd) % 2**N) + me = -((Mi * ce) % 2**N) + cd += md * M + ce += me * M + ... +``` + +Now note that we have two steps of corrections to *cd* and *ce* that add multiples of *M*: this +increment, and the decrement that cancels out bottom bits. The second one depends on the first +one, but they can still be efficiently combined by only computing the bottom bits of *cd* and *ce* +at first, and using that to compute the final *md*, *me* values: + +```python +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e], modulo M.""" + u, v, q, r = t + md, me = 0, 0 + # Compute what multiples of M to add to cd and ce. + if d < 0: + md += u + me += q + if e < 0: + md += v + me += r + # Compute bottom N bits of t*[d,e] + M*[md,me]. + cd, ce = (u*d + v*e + md*M) % 2**N, (q*d + r*e + me*M) % 2**N + # Correct md and me such that the bottom N bits of t*[d,e] + M*[md,me] are zero. + md -= (Mi * cd) % 2**N + me -= (Mi * ce) % 2**N + # Do the full computation. + cd, ce = u*d + v*e + md*M, q*d + r*e + me*M + # And cleanly divide by 2**N. + return cd >> N, ce >> N +``` + +One last optimization: we can avoid the *md M* and *me M* multiplications in the bottom bits of *cd* +and *ce* by moving them to the *md* and *me* correction: + +```python + ... + # Compute bottom N bits of t*[d,e]. + cd, ce = (u*d + v*e) % 2**N, (q*d + r*e) % 2**N + # Correct md and me such that the bottom N bits of t*[d,e]+M*[md,me] are zero. + # Note that this is not the same as {md = (-Mi * cd) % 2**N} etc. That would also result in N + # zero bottom bits, but isn't guaranteed to be a reduction of [0,2^N) compared to the + # previous md and me values, and thus would violate our bounds analysis. + md -= (Mi*cd + md) % 2**N + me -= (Mi*ce + me) % 2**N + ... +``` + +The resulting function takes *d* and *e* in range *(-2M,M)* as inputs, and outputs values in the same +range. That also means that the *d* value at the end of `modinv` will be in that range, while we want +a result in *[0,M)*. To do that, we need a normalization function. It's easy to integrate the +conditional negation of *d* (based on the sign of *f*) into it as well: + +```python +def normalize(sign, v, M): + """Compute sign*v mod M, where v is in range (-2*M,M); output in [0,M).""" + assert sign == 1 or sign == -1 + # v in (-2*M,M) + if v < 0: + v += M + # v in (-M,M). Now multiply v with sign (which can only be 1 or -1). + if sign == -1: + v = -v + # v in (-M,M) + if v < 0: + v += M + # v in [0,M) + return v +``` + +And calling it in `modinv` is simply: + +```python + ... + return normalize(f, d, M) +``` + + +## 5. Constant-time operation + +The primary selling point of the algorithm is fast constant-time operation. What code flow still +depends on the input data so far? + +- the number of iterations of the while *g ≠ 0* loop in `modinv` +- the branches inside `divsteps_n_matrix` +- the sign checks in `update_de` +- the sign checks in `normalize` + +To make the while loop in `modinv` constant time it can be replaced with a constant number of +iterations. The paper proves (Theorem 11.2) that *741* divsteps are sufficient for any *256*-bit +inputs, and [safegcd-bounds](https://github.com/sipa/safegcd-bounds) shows that the slightly better bound *724* is +sufficient even. Given that every loop iteration performs *N* divsteps, it will run a total of +*⌈724/N⌉* times. + +To deal with the branches in `divsteps_n_matrix` we will replace them with constant-time bitwise +operations (and hope the C compiler isn't smart enough to turn them back into branches; see +`valgrind_ctime_test.c` for automated tests that this isn't the case). To do so, observe that a +divstep can be written instead as (compare to the inner loop of `gcd` in section 1). + +```python + x = -f if delta > 0 else f # set x equal to (input) -f or f + if g & 1: + g += x # set g to (input) g-f or g+f + if delta > 0: + delta = -delta + f += g # set f to (input) g (note that g was set to g-f before) + delta += 1 + g >>= 1 +``` + +To convert the above to bitwise operations, we rely on a trick to negate conditionally: per the +definition of negative numbers in two's complement, (*-v == ~v + 1*) holds for every number *v*. As +*-1* in two's complement is all *1* bits, bitflipping can be expressed as xor with *-1*. It follows +that *-v == (v ^ -1) - (-1)*. Thus, if we have a variable *c* that takes on values *0* or *-1*, then +*(v ^ c) - c* is *v* if *c=0* and *-v* if *c=-1*. + +Using this we can write: + +```python + x = -f if delta > 0 else f +``` + +in constant-time form as: + +```python + c1 = (-delta) >> 63 + # Conditionally negate f based on c1: + x = (f ^ c1) - c1 +``` + +To use that trick, we need a helper mask variable *c1* that resolves the condition *δ>0* to *-1* +(if true) or *0* (if false). We compute *c1* using right shifting, which is equivalent to dividing by +the specified power of *2* and rounding down (in Python, and also in C under the assumption of a typical two's complement system; see +`assumptions.h` for tests that this is the case). Right shifting by *63* thus maps all +numbers in range *[-263,0)* to *-1*, and numbers in range *[0,263)* to *0*. + +Using the facts that *x&0=0* and *x&(-1)=x* (on two's complement systems again), we can write: + +```python + if g & 1: + g += x +``` + +as: + +```python + # Compute c2=0 if g is even and c2=-1 if g is odd. + c2 = -(g & 1) + # This masks out x if g is even, and leaves x be if g is odd. + g += x & c2 +``` + +Using the conditional negation trick again we can write: + +```python + if g & 1: + if delta > 0: + delta = -delta +``` + +as: + +```python + # Compute c3=-1 if g is odd and delta>0, and 0 otherwise. + c3 = c1 & c2 + # Conditionally negate delta based on c3: + delta = (delta ^ c3) - c3 +``` + +Finally: + +```python + if g & 1: + if delta > 0: + f += g +``` + +becomes: + +```python + f += g & c3 +``` + +It turns out that this can be implemented more efficiently by applying the substitution +*η=-δ*. In this representation, negating *δ* corresponds to negating *η*, and incrementing +*δ* corresponds to decrementing *η*. This allows us to remove the negation in the *c1* +computation: + +```python + # Compute a mask c1 for eta < 0, and compute the conditional negation x of f: + c1 = eta >> 63 + x = (f ^ c1) - c1 + # Compute a mask c2 for odd g, and conditionally add x to g: + c2 = -(g & 1) + g += x & c2 + # Compute a mask c for (eta < 0) and odd (input) g, and use it to conditionally negate eta, + # and add g to f: + c3 = c1 & c2 + eta = (eta ^ c3) - c3 + f += g & c3 + # Incrementing delta corresponds to decrementing eta. + eta -= 1 + g >>= 1 +``` + +A variant of divsteps with better worst-case performance can be used instead: starting *δ* at +*1/2* instead of *1*. This reduces the worst case number of iterations to *590* for *256*-bit inputs +(which can be shown using convex hull analysis). In this case, the substitution *ζ=-(δ+1/2)* +is used instead to keep the variable integral. Incrementing *δ* by *1* still translates to +decrementing *ζ* by *1*, but negating *δ* now corresponds to going from *ζ* to *-(ζ+1)*, or +*~ζ*. Doing that conditionally based on *c3* is simply: + +```python + ... + c3 = c1 & c2 + zeta ^= c3 + ... +``` + +By replacing the loop in `divsteps_n_matrix` with a variant of the divstep code above (extended to +also apply all *f* operations to *u*, *v* and all *g* operations to *q*, *r*), a constant-time version of +`divsteps_n_matrix` is obtained. The full code will be in section 7. + +These bit fiddling tricks can also be used to make the conditional negations and additions in +`update_de` and `normalize` constant-time. + + +## 6. Variable-time optimizations + +In section 5, we modified the `divsteps_n_matrix` function (and a few others) to be constant time. +Constant time operations are only necessary when computing modular inverses of secret data. In +other cases, it slows down calculations unnecessarily. In this section, we will construct a +faster non-constant time `divsteps_n_matrix` function. + +To do so, first consider yet another way of writing the inner loop of divstep operations in +`gcd` from section 1. This decomposition is also explained in the paper in section 8.2. We use +the original version with initial *δ=1* and *η=-δ* here. + +```python +for _ in range(N): + if g & 1 and eta < 0: + eta, f, g = -eta, g, -f + if g & 1: + g += f + eta -= 1 + g >>= 1 +``` + +Whenever *g* is even, the loop only shifts *g* down and decreases *η*. When *g* ends in multiple zero +bits, these iterations can be consolidated into one step. This requires counting the bottom zero +bits efficiently, which is possible on most platforms; it is abstracted here as the function +`count_trailing_zeros`. + +```python +def count_trailing_zeros(v): + """For a non-zero value v, find z such that v=(d<>= zeros + i -= zeros + if i == 0: + break + # We know g is odd now + if eta < 0: + eta, f, g = -eta, g, -f + g += f + # g is even now, and the eta decrement and g shift will happen in the next loop. +``` + +We can now remove multiple bottom *0* bits from *g* at once, but still need a full iteration whenever +there is a bottom *1* bit. In what follows, we will get rid of multiple *1* bits simultaneously as +well. + +Observe that as long as *η ≥ 0*, the loop does not modify *f*. Instead, it cancels out bottom +bits of *g* and shifts them out, and decreases *η* and *i* accordingly - interrupting only when *η* +becomes negative, or when *i* reaches *0*. Combined, this is equivalent to adding a multiple of *f* to +*g* to cancel out multiple bottom bits, and then shifting them out. + +It is easy to find what that multiple is: we want a number *w* such that *g+w f* has a few bottom +zero bits. If that number of bits is *L*, we want *g+w f mod 2L = 0*, or *w = -g/f mod 2L*. Since *f* +is odd, such a *w* exists for any *L*. *L* cannot be more than *i* steps (as we'd finish the loop before +doing more) or more than *η+1* steps (as we'd run `eta, f, g = -eta, g, f` at that point), but +apart from that, we're only limited by the complexity of computing *w*. + +This code demonstrates how to cancel up to 4 bits per step: + +```python +NEGINV16 = [15, 5, 3, 9, 7, 13, 11, 1] # NEGINV16[n//2] = (-n)^-1 mod 16, for odd n +i = N +while True: + zeros = min(i, count_trailing_zeros(g)) + eta -= zeros + g >>= zeros + i -= zeros + if i == 0: + break + # We know g is odd now + if eta < 0: + eta, f, g = -eta, g, f + # Compute limit on number of bits to cancel + limit = min(min(eta + 1, i), 4) + # Compute w = -g/f mod 2**limit, using the table value for -1/f mod 2**4. Note that f is + # always odd, so its inverse modulo a power of two always exists. + w = (g * NEGINV16[(f & 15) // 2]) % (2**limit) + # As w = -g/f mod (2**limit), g+w*f mod 2**limit = 0 mod 2**limit. + g += w * f + assert g % (2**limit) == 0 + # The next iteration will now shift out at least limit bottom zero bits from g. +``` + +By using a bigger table more bits can be cancelled at once. The table can also be implemented +as a formula. Several formulas are known for computing modular inverses modulo powers of two; +some can be found in Hacker's Delight second edition by Henry S. Warren, Jr. pages 245-247. +Here we need the negated modular inverse, which is a simple transformation of those: + +- Instead of a 3-bit table: + - *-f* or *f ^ 6* +- Instead of a 4-bit table: + - *1 - f(f + 1)* + - *-(f + (((f + 1) & 4) << 1))* +- For larger tables the following technique can be used: if *w=-1/f mod 2L*, then *w(w f+2)* is + *-1/f mod 22L*. This allows extending the previous formulas (or tables). In particular we + have this 6-bit function (based on the 3-bit function above): + - *f(f2 - 2)* + +This loop, again extended to also handle *u*, *v*, *q*, and *r* alongside *f* and *g*, placed in +`divsteps_n_matrix`, gives a significantly faster, but non-constant time version. + + +## 7. Final Python version + +All together we need the following functions: + +- A way to compute the transition matrix in constant time, using the `divsteps_n_matrix` function + from section 2, but with its loop replaced by a variant of the constant-time divstep from + section 5, extended to handle *u*, *v*, *q*, *r*: + +```python +def divsteps_n_matrix(zeta, f, g): + """Compute zeta and transition matrix t after N divsteps (multiplied by 2^N).""" + u, v, q, r = 1, 0, 0, 1 # start with identity matrix + for _ in range(N): + c1 = zeta >> 63 + # Compute x, y, z as conditionally-negated versions of f, u, v. + x, y, z = (f ^ c1) - c1, (u ^ c1) - c1, (v ^ c1) - c1 + c2 = -(g & 1) + # Conditionally add x, y, z to g, q, r. + g, q, r = g + (x & c2), q + (y & c2), r + (z & c2) + c1 &= c2 # reusing c1 here for the earlier c3 variable + zeta = (zeta ^ c1) - 1 # inlining the unconditional zeta decrement here + # Conditionally add g, q, r to f, u, v. + f, u, v = f + (g & c1), u + (q & c1), v + (r & c1) + # When shifting g down, don't shift q, r, as we construct a transition matrix multiplied + # by 2^N. Instead, shift f's coefficients u and v up. + g, u, v = g >> 1, u << 1, v << 1 + return zeta, (u, v, q, r) +``` + +- The functions to update *f* and *g*, and *d* and *e*, from section 2 and section 4, with the constant-time + changes to `update_de` from section 5: + +```python +def update_fg(f, g, t): + """Multiply matrix t/2^N with [f, g].""" + u, v, q, r = t + cf, cg = u*f + v*g, q*f + r*g + return cf >> N, cg >> N + +def update_de(d, e, t, M, Mi): + """Multiply matrix t/2^N with [d, e], modulo M.""" + u, v, q, r = t + d_sign, e_sign = d >> 257, e >> 257 + md, me = (u & d_sign) + (v & e_sign), (q & d_sign) + (r & e_sign) + cd, ce = (u*d + v*e) % 2**N, (q*d + r*e) % 2**N + md -= (Mi*cd + md) % 2**N + me -= (Mi*ce + me) % 2**N + cd, ce = u*d + v*e + M*md, q*d + r*e + M*me + return cd >> N, ce >> N +``` + +- The `normalize` function from section 4, made constant time as well: + +```python +def normalize(sign, v, M): + """Compute sign*v mod M, where v in (-2*M,M); output in [0,M).""" + v_sign = v >> 257 + # Conditionally add M to v. + v += M & v_sign + c = (sign - 1) >> 1 + # Conditionally negate v. + v = (v ^ c) - c + v_sign = v >> 257 + # Conditionally add M to v again. + v += M & v_sign + return v +``` + +- And finally the `modinv` function too, adapted to use *ζ* instead of *δ*, and using the fixed + iteration count from section 5: + +```python +def modinv(M, Mi, x): + """Compute the modular inverse of x mod M, given Mi=1/M mod 2^N.""" + zeta, f, g, d, e = -1, M, x, 0, 1 + for _ in range((590 + N - 1) // N): + zeta, t = divsteps_n_matrix(zeta, f % 2**N, g % 2**N) + f, g = update_fg(f, g, t) + d, e = update_de(d, e, t, M, Mi) + return normalize(f, d, M) +``` + +- To get a variable time version, replace the `divsteps_n_matrix` function with one that uses the + divsteps loop from section 5, and a `modinv` version that calls it without the fixed iteration + count: + +```python +NEGINV16 = [15, 5, 3, 9, 7, 13, 11, 1] # NEGINV16[n//2] = (-n)^-1 mod 16, for odd n +def divsteps_n_matrix_var(eta, f, g): + """Compute eta and transition matrix t after N divsteps (multiplied by 2^N).""" + u, v, q, r = 1, 0, 0, 1 + i = N + while True: + zeros = min(i, count_trailing_zeros(g)) + eta, i = eta - zeros, i - zeros + g, u, v = g >> zeros, u << zeros, v << zeros + if i == 0: + break + if eta < 0: + eta, f, u, v, g, q, r = -eta, g, q, r, -f, -u, -v + limit = min(min(eta + 1, i), 4) + w = (g * NEGINV16[(f & 15) // 2]) % (2**limit) + g, q, r = g + w*f, q + w*u, r + w*v + return eta, (u, v, q, r) + +def modinv_var(M, Mi, x): + """Compute the modular inverse of x mod M, given Mi = 1/M mod 2^N.""" + eta, f, g, d, e = -1, M, x, 0, 1 + while g != 0: + eta, t = divsteps_n_matrix_var(eta, f % 2**N, g % 2**N) + f, g = update_fg(f, g, t) + d, e = update_de(d, e, t, M, Mi) + return normalize(f, d, Mi) +``` diff --git a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h index 60fe2deb..34ee5d11 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h +++ b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h @@ -7,7 +7,9 @@ extern "C" { #include -/* These rules specify the order of arguments in API calls: +/* Unless explicitly stated all pointer arguments must not be NULL. + * + * The following rules specify the order of arguments in API calls: * * 1. Context pointers go first, followed by output arguments, combined * output/input arguments, and finally input-only arguments. @@ -61,8 +63,9 @@ typedef struct rustsecp256k1zkp_v0_4_0_scratch_space_struct rustsecp256k1zkp_v0_ * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize and rustsecp256k1zkp_v0_4_0_ec_pubkey_parse. + * If you need to convert to a format suitable for storage or transmission, + * use rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize and rustsecp256k1zkp_v0_4_0_ec_pubkey_parse. To + * compare keys, use rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; @@ -127,6 +130,17 @@ typedef int (*rustsecp256k1zkp_v0_4_0_nonce_function)( # define SECP256K1_INLINE inline # endif +/** When this header is used at build-time the SECP256K1_BUILD define needs to be set + * to correctly setup export attributes and nullness checks. This is normally done + * by secp256k1.c but to guard against this header being included before secp256k1.c + * has had a chance to set the define (e.g. via test harnesses that just includes + * secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the + * BUILD define so this condition can be caught. + */ +#ifndef SECP256K1_BUILD +# define SECP256K1_NO_BUILD +#endif + #ifndef SECP256K1_API # if defined(_WIN32) # ifdef SECP256K1_BUILD @@ -353,6 +367,21 @@ SECP256K1_API int rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize( unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Compare two public keys using lexicographic (of compressed serialization) order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp( + const rustsecp256k1zkp_v0_4_0_context* ctx, + const rustsecp256k1zkp_v0_4_0_pubkey* pubkey1, + const rustsecp256k1zkp_v0_4_0_pubkey* pubkey2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Parse an ECDSA signature in compact (64 bytes) format. * * Returns: 1 when the signature could be parsed, 0 otherwise. @@ -747,6 +776,31 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_ec_pubkey size_t n ) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Compute a tagged hash as defined in BIP-340. + * + * This is useful for creating a message hash and achieving domain separation + * through an application-specific tag. This function returns + * SHA256(SHA256(tag)||SHA256(tag)||msg). Therefore, tagged hash + * implementations optimized for a specific tag can precompute the SHA256 state + * after hashing the tag hashes. + * + * Returns 0 if the arguments are invalid and 1 otherwise. + * Args: ctx: pointer to a context object + * Out: hash32: pointer to a 32-byte array to store the resulting hash + * In: tag: pointer to an array containing the tag + * taglen: length of the tag array + * msg: pointer to an array containing the message + * msglen: length of the message array + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_tagged_sha256( + const rustsecp256k1zkp_v0_4_0_context* ctx, + unsigned char *hash32, + const unsigned char *tag, + size_t taglen, + const unsigned char *msg, + size_t msglen +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5); + #ifdef __cplusplus } #endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h.orig b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h.orig index a5083c41..5325dc94 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h.orig +++ b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1.h.orig @@ -7,7 +7,9 @@ extern "C" { #include -/* These rules specify the order of arguments in API calls: +/* Unless explicitly stated all pointer arguments must not be NULL. + * + * The following rules specify the order of arguments in API calls: * * 1. Context pointers go first, followed by output arguments, combined * output/input arguments, and finally input-only arguments. @@ -61,8 +63,9 @@ typedef struct rustsecp256k1zkp_v0_4_0_scratch_space_struct rustsecp256k1zkp_v0_ * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize and rustsecp256k1zkp_v0_4_0_ec_pubkey_parse. + * If you need to convert to a format suitable for storage or transmission, + * use rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize and rustsecp256k1zkp_v0_4_0_ec_pubkey_parse. To + * compare keys, use rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp. */ typedef struct { unsigned char data[64]; @@ -127,6 +130,17 @@ typedef int (*rustsecp256k1zkp_v0_4_0_nonce_function)( # define SECP256K1_INLINE inline # endif +/** When this header is used at build-time the SECP256K1_BUILD define needs to be set + * to correctly setup export attributes and nullness checks. This is normally done + * by secp256k1.c but to guard against this header being included before secp256k1.c + * has had a chance to set the define (e.g. via test harnesses that just includes + * secp256k1.c) we set SECP256K1_NO_BUILD when this header is processed without the + * BUILD define so this condition can be caught. + */ +#ifndef SECP256K1_BUILD +# define SECP256K1_NO_BUILD +#endif + #ifndef SECP256K1_API # if defined(_WIN32) # ifdef SECP256K1_BUILD @@ -370,6 +384,21 @@ SECP256K1_API int rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize( unsigned int flags ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Compare two public keys using lexicographic (of compressed serialization) order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp( + const rustsecp256k1zkp_v0_4_0_context* ctx, + const rustsecp256k1zkp_v0_4_0_pubkey* pubkey1, + const rustsecp256k1zkp_v0_4_0_pubkey* pubkey2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Parse an ECDSA signature in compact (64 bytes) format. * * Returns: 1 when the signature could be parsed, 0 otherwise. @@ -764,6 +793,31 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_ec_pubkey size_t n ) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Compute a tagged hash as defined in BIP-340. + * + * This is useful for creating a message hash and achieving domain separation + * through an application-specific tag. This function returns + * SHA256(SHA256(tag)||SHA256(tag)||msg). Therefore, tagged hash + * implementations optimized for a specific tag can precompute the SHA256 state + * after hashing the tag hashes. + * + * Returns 0 if the arguments are invalid and 1 otherwise. + * Args: ctx: pointer to a context object + * Out: hash32: pointer to a 32-byte array to store the resulting hash + * In: tag: pointer to an array containing the tag + * taglen: length of the tag array + * msg: pointer to an array containing the message + * msglen: length of the message array + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_tagged_sha256( + const rustsecp256k1zkp_v0_4_0_context* ctx, + unsigned char *hash32, + const unsigned char *tag, + size_t taglen, + const unsigned char *msg, + size_t msglen +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5); + #ifdef __cplusplus } #endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_extrakeys.h b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_extrakeys.h index 2f7df493..49c51f8f 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_extrakeys.h +++ b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_extrakeys.h @@ -15,9 +15,9 @@ extern "C" { * The exact representation of data inside is implementation defined and not * guaranteed to be portable between different platforms or versions. It is * however guaranteed to be 64 bytes in size, and can be safely copied/moved. - * If you need to convert to a format suitable for storage, transmission, or - * comparison, use rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize and - * rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse. + * If you need to convert to a format suitable for storage, transmission, use + * use rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize and rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse. To + * compare keys, use rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp. */ typedef struct { unsigned char data[64]; @@ -67,6 +67,21 @@ SECP256K1_API int rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize( const rustsecp256k1zkp_v0_4_0_xonly_pubkey* pubkey ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Compare two x-only public keys using lexicographic order + * + * Returns: <0 if the first public key is less than the second + * >0 if the first public key is greater than the second + * 0 if the two public keys are equal + * Args: ctx: a secp256k1 context object. + * In: pubkey1: first public key to compare + * pubkey2: second public key to compare + */ +SECP256K1_API int rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp( + const rustsecp256k1zkp_v0_4_0_context* ctx, + const rustsecp256k1zkp_v0_4_0_xonly_pubkey* pk1, + const rustsecp256k1zkp_v0_4_0_xonly_pubkey* pk2 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + /** Converts a rustsecp256k1zkp_v0_4_0_pubkey into a rustsecp256k1zkp_v0_4_0_xonly_pubkey. * * Returns: 1 if the public key was successfully converted @@ -151,6 +166,20 @@ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_xonly_pub const unsigned char *tweak32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); +/** Sorts xonly public keys according to rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp + * + * Returns: 0 if the arguments are invalid. 1 otherwise. + * + * Args: ctx: pointer to a context object + * In: pubkeys: array of pointers to pubkeys to sort + * n_pubkeys: number of elements in the pubkeys array + */ +SECP256K1_API int rustsecp256k1zkp_v0_4_0_xonly_sort( + const rustsecp256k1zkp_v0_4_0_context* ctx, + const rustsecp256k1zkp_v0_4_0_xonly_pubkey **pubkeys, + size_t n_pubkeys +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2); + /** Compute the keypair for a secret key. * * Returns: 1: secret was valid, keypair is ready to use diff --git a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_musig.h b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_musig.h index f7a84b43..2043f5ef 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_musig.h +++ b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_musig.h @@ -9,342 +9,155 @@ extern "C" { #include -/** This module implements a Schnorr-based multi-signature scheme called MuSig - * (https://eprint.iacr.org/2018/068.pdf). It is compatible with bip-schnorr. +/** This module implements a Schnorr-based multi-signature scheme called MuSig2 + * (https://eprint.iacr.org/2020/1261). It is compatible with BIP-340 ("Schnorr"). * There's an example C source file in the module's directory * (src/modules/musig/example.c) that demonstrates how it can be used. * - * The documentation in this include file is for reference and may not be sufficient - * for users to begin using the library. A full description of API usage can be found - * in src/modules/musig/musig.md + * The module also supports BIP-341 ("Taproot") public key tweaking and adaptor + * signatures as described in + * https://github.com/ElementsProject/scriptless-scripts/pull/24 + * + * It is recommended to read the documention in this include file carefully. + * Further notes on API usage can be found in src/modules/musig/musig.md */ -/** Data structure containing auxiliary data generated in `pubkey_combine` and - * required for `session_*_init`. - * Fields: - * magic: Set during initialization in `pubkey_combine` to allow - * detecting an uninitialized object. - * pk_hash: The 32-byte hash of the original public keys - * pk_parity: Whether the MuSig-aggregated point was negated when - * converting it to the combined xonly pubkey. - * is_tweaked: Whether the combined pubkey was tweaked - * tweak: If is_tweaked, array with the 32-byte tweak - * internal_key_parity: If is_tweaked, the parity of the combined pubkey - * before tweaking +/** Opaque data structures + * + * The exact representation of data inside is implementation defined and not + * guaranteed to be portable between different platforms or versions. It can, + * however, be safely copied/moved. If you need to convert to a format suitable + * for storage, transmission, or comparison, use the corresponding + * serialization and parsing functions. */ -typedef struct { - uint64_t magic; - unsigned char pk_hash[32]; - int pk_parity; - int is_tweaked; - unsigned char tweak[32]; - int internal_key_parity; -} rustsecp256k1zkp_v0_4_0_musig_pre_session; -/** Data structure containing data related to a signing session resulting in a single - * signature. - * - * This structure is not opaque, but it MUST NOT be copied or read or written to it - * directly. A signer who is online throughout the whole process and can keep this - * structure in memory can use the provided API functions for a safe standard - * workflow. See https://blockstream.com/2019/02/18/musig-a-new-multisignature-standard/ - * for more details about the risks associated with serializing or deserializing this - * structure. - * - * Fields: - * magic: Set in `musig_session_init` to allow detecting an - * uninitialized object. - * round: Current round of the session - * pre_session: Auxiliary data created in `pubkey_combine` - * combined_pk: MuSig-computed combined xonly public key - * n_signers: Number of signers - * msg: The 32-byte message (hash) to be signed - * is_msg_set: Whether the above message has been set - * has_secret_data: Whether this session object has a signers' secret data; if this - * is `false`, it may still be used for verification purposes. - * seckey: If `has_secret_data`, the signer's secret key - * secnonce: If `has_secret_data`, the signer's secret nonce - * nonce: If `has_secret_data`, the signer's public nonce - * nonce_commitments_hash: If `has_secret_data` and round >= 1, the hash of all - * signers' commitments - * combined_nonce: If round >= 2, the summed combined public nonce - * combined_nonce_parity: If round >= 2, the parity of the Y coordinate of above - * nonce. +/** Opaque data structure that caches information about public key aggregation. + * + * Guaranteed to be 165 bytes in size. No serialization and parsing functions + * (yet). */ typedef struct { - uint64_t magic; - int round; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk; - uint32_t n_signers; - int is_msg_set; - unsigned char msg[32]; - int has_secret_data; - unsigned char seckey[32]; - unsigned char secnonce[32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey nonce; - int partial_nonce_parity; - unsigned char nonce_commitments_hash[32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_nonce; - int combined_nonce_parity; -} rustsecp256k1zkp_v0_4_0_musig_session; + unsigned char data[165]; +} rustsecp256k1zkp_v0_4_0_musig_keyagg_cache; -/** Data structure containing data on all signers in a single session. - * - * The workflow for this structure is as follows: - * - * 1. This structure is initialized with `musig_session_init` or - * `musig_session_init_verifier`, which set the `index` field, and zero out - * all other fields. The public session is initialized with the signers' - * nonce_commitments. - * - * 2. In a non-public session the nonce_commitments are set with the function - * `musig_get_public_nonce`, which also returns the signer's public nonce. This - * ensures that the public nonce is not exposed until all commitments have been - * received. - * - * 3. Each individual data struct should be updated with `musig_set_nonce` once a - * nonce is available. This function takes a single signer data struct rather than - * an array because it may fail in the case that the provided nonce does not match - * the commitment. In this case, it is desirable to identify the exact party whose - * nonce was inconsistent. - * - * Fields: - * present: indicates whether the signer's nonce is set - * index: index of the signer in the MuSig key aggregation - * nonce: public nonce, must be a valid curvepoint if the signer is `present` - * nonce_commitment: commitment to the nonce, or all-bits zero if a commitment - * has not yet been set +/** Opaque data structure that holds a signer's _secret_ nonce. + * + * Guaranteed to be 68 bytes in size. + * + * WARNING: This structure MUST NOT be copied or read or written to directly. A + * signer who is online throughout the whole process and can keep this + * structure in memory can use the provided API functions for a safe standard + * workflow. See + * https://blockstream.com/2019/02/18/musig-a-new-multisignature-standard/ for + * more details about the risks associated with serializing or deserializing + * this structure. + * + * We repeat, copying this data structure can result in nonce reuse which will + * leak the secret signing key. */ typedef struct { - int present; - uint32_t index; - rustsecp256k1zkp_v0_4_0_xonly_pubkey nonce; - unsigned char nonce_commitment[32]; -} rustsecp256k1zkp_v0_4_0_musig_session_signer_data; + unsigned char data[68]; +} rustsecp256k1zkp_v0_4_0_musig_secnonce; + +/** Opaque data structure that holds a signer's public nonce. +* +* Guaranteed to be 132 bytes in size. Serialized and parsed with +* `musig_pubnonce_serialize` and `musig_pubnonce_parse`. +*/ +typedef struct { + unsigned char data[132]; +} rustsecp256k1zkp_v0_4_0_musig_pubnonce; -/** Opaque data structure that holds a MuSig partial signature. +/** Opaque data structure that holds an aggregate public nonce. * - * The exact representation of data inside is implementation defined and not - * guaranteed to be portable between different platforms or versions. It is however - * guaranteed to be 32 bytes in size, and can be safely copied/moved. If you need - * to convert to a format suitable for storage, transmission, or comparison, use the - * `musig_partial_signature_serialize` and `musig_partial_signature_parse` - * functions. + * Guaranteed to be 132 bytes in size. Serialized and parsed with + * `musig_aggnonce_serialize` and `musig_aggnonce_parse`. */ typedef struct { - unsigned char data[32]; -} rustsecp256k1zkp_v0_4_0_musig_partial_signature; + unsigned char data[132]; +} rustsecp256k1zkp_v0_4_0_musig_aggnonce; -/** Computes a combined public key and the hash of the given public keys. - * Different orders of `pubkeys` result in different `combined_pk`s. +/** Opaque data structure that holds a cache for a MuSig session. * - * Returns: 1 if the public keys were successfully combined, 0 otherwise - * Args: ctx: pointer to a context object initialized for verification - * (cannot be NULL) - * scratch: scratch space used to compute the combined pubkey by - * multiexponentiation. If NULL, an inefficient algorithm is used. - * Out: combined_pk: the MuSig-combined xonly public key (cannot be NULL) - * pre_session: if non-NULL, pointer to a musig_pre_session struct to be used in - * `musig_session_init` or `musig_pubkey_tweak_add`. - * In: pubkeys: input array of public keys to combine. The order is important; - * a different order will result in a different combined public - * key (cannot be NULL) - * n_pubkeys: length of pubkeys array. Must be greater than 0. + * This structure is not necessarily required to be kept secret. Guaranteed to + * be 133 bytes in size. No serialization and parsing functions (yet). */ -SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_pubkey_combine( - const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_scratch_space *scratch, - rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, - rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, - const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkeys, - size_t n_pubkeys -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5); +typedef struct { + unsigned char data[133]; +} rustsecp256k1zkp_v0_4_0_musig_session; -/** Tweak an x-only public key by adding the generator multiplied with tweak32 - * to it. The resulting output_pubkey with the given internal_pubkey and tweak - * passes `rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_test`. - * - * This function is only useful before initializing a signing session. If you - * are only computing a public key, but not intending to create a signature for - * it, you can just use `rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_add`. Can only be called - * once with a given pre_session. +/** Opaque data structure that holds a partial MuSig signature. * - * Returns: 0 if the arguments are invalid or the resulting public key would be - * invalid (only when the tweak is the negation of the corresponding - * secret key). 1 otherwise. - * Args: ctx: pointer to a context object initialized for verification - * (cannot be NULL) - * pre_session: pointer to a `musig_pre_session` struct initialized in - * `musig_pubkey_combine` (cannot be NULL) - * Out: output_pubkey: pointer to a public key to store the result. Will be set - * to an invalid value if this function returns 0 (cannot - * be NULL) - * In: internal_pubkey: pointer to the `combined_pk` from - * `musig_pubkey_combine` to which the tweak is applied. - * (cannot be NULL). - * tweak32: pointer to a 32-byte tweak. If the tweak is invalid - * according to rustsecp256k1zkp_v0_4_0_ec_seckey_verify, this function - * returns 0. For uniformly random 32-byte arrays the - * chance of being invalid is negligible (around 1 in - * 2^128) (cannot be NULL). + * Guaranteed to be 36 bytes in size. Serialized and parsed with + * `musig_partial_sig_serialize` and `musig_partial_sig_parse`. */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add( - const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, - rustsecp256k1zkp_v0_4_0_pubkey *output_pubkey, - const rustsecp256k1zkp_v0_4_0_xonly_pubkey *internal_pubkey, - const unsigned char *tweak32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); +typedef struct { + unsigned char data[36]; +} rustsecp256k1zkp_v0_4_0_musig_partial_sig; -/** Initializes a signing session for a signer - * - * Returns: 1: session is successfully initialized - * 0: session could not be initialized: secret key or secret nonce overflow - * Args: ctx: pointer to a context object, initialized for signing (cannot - * be NULL) - * Out: session: the session structure to initialize (cannot be NULL) - * signers: an array of signers' data to be initialized. Array length must - * equal to `n_signers` (cannot be NULL) - * nonce_commitment32: filled with a 32-byte commitment to the generated nonce - * (cannot be NULL) - * In: session_id32: a *unique* 32-byte ID to assign to this session (cannot be - * NULL). If a non-unique session_id32 was given then a partial - * signature will LEAK THE SECRET KEY. - * msg32: the 32-byte message to be signed. Shouldn't be NULL unless you - * require sharing nonce commitments before the message is known - * because it reduces nonce misuse resistance. If NULL, must be - * set with `musig_session_get_public_nonce`. - * combined_pk: the combined xonly public key of all signers (cannot be NULL) - * pre_session: pointer to a musig_pre_session struct after initializing - * it with `musig_pubkey_combine` and optionally provided to - * `musig_pubkey_tweak_add` (cannot be NULL). - * n_signers: length of signers array. Number of signers participating in - * the MuSig. Must be greater than 0 and at most 2^32 - 1. - * my_index: index of this signer in the signers array. Must be less - * than `n_signers`. - * seckey: the signer's 32-byte secret key (cannot be NULL) +/** Parse a signers public nonce. + * + * Returns: 1 when the nonce could be parsed, 0 otherwise. + * Args: ctx: a secp256k1 context object + * Out: nonce: pointer to a nonce object + * In: in66: pointer to the 66-byte nonce to be parsed */ -SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_session_init( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse( const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_session *session, - rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, - unsigned char *nonce_commitment32, - const unsigned char *session_id32, - const unsigned char *msg32, - const rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, - const rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, - size_t n_signers, - size_t my_index, - const unsigned char *seckey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(7) SECP256K1_ARG_NONNULL(8) SECP256K1_ARG_NONNULL(11); + rustsecp256k1zkp_v0_4_0_musig_pubnonce* nonce, + const unsigned char *in66 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Gets the signer's public nonce given a list of all signers' data with - * commitments. Called by participating signers after - * `rustsecp256k1zkp_v0_4_0_musig_session_init` and after all nonce commitments have - * been collected - * - * Returns: 1: public nonce is written in nonce - * 0: signer data is missing commitments or session isn't initialized - * for signing - * Args: ctx: pointer to a context object (cannot be NULL) - * session: the signing session to get the nonce from (cannot be NULL) - * signers: an array of signers' data initialized with - * `musig_session_init`. Array length must equal to - * `n_commitments` (cannot be NULL) - * Out: nonce32: filled with a 32-byte public nonce which is supposed to be - * sent to the other signers and then used in `musig_set nonce` - * (cannot be NULL) - * In: commitments: array of pointers to 32-byte nonce commitments (cannot be NULL) - * n_commitments: the length of commitments and signers array. Must be the total - * number of signers participating in the MuSig. - * msg32: the 32-byte message to be signed. Must be NULL if already - * set with `musig_session_init` otherwise can not be NULL. +/** Serialize a signer's public nonce + * + * Returns: 1 when the nonce could be serialized, 0 otherwise + * Args: ctx: a secp256k1 context object + * Out: out32: pointer to a 66-byte array to store the serialized nonce + * In: nonce: pointer to the nonce */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize( const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_session *session, - rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, - unsigned char *nonce32, - const unsigned char *const *commitments, - size_t n_commitments, - const unsigned char *msg32 -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); + unsigned char *out66, + const rustsecp256k1zkp_v0_4_0_musig_pubnonce* nonce +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Initializes a verifier session that can be used for verifying nonce commitments - * and partial signatures. It does not have secret key material and therefore can not - * be used to create signatures. - * - * Returns: 1 when session is successfully initialized, 0 otherwise - * Args: ctx: pointer to a context object (cannot be NULL) - * Out: session: the session structure to initialize (cannot be NULL) - * signers: an array of signers' data to be initialized. Array length must - * equal to `n_signers`(cannot be NULL) - * In: msg32: the 32-byte message to be signed (cannot be NULL) - * combined_pk: the combined xonly public key of all signers (cannot be NULL) - * pre_session: pointer to a musig_pre_session struct from - * `musig_pubkey_combine` (cannot be NULL) - * pk_hash32: the 32-byte hash of the signers' individual keys (cannot be NULL) - * commitments: array of pointers to 32-byte nonce commitments. Array - * length must equal to `n_signers` (cannot be NULL) - * n_signers: length of signers and commitments array. Number of signers - * participating in the MuSig. Must be greater than 0 and at most - * 2^32 - 1. +/** Parse an aggregate public nonce. + * + * Returns: 1 when the nonce could be parsed, 0 otherwise. + * Args: ctx: a secp256k1 context object + * Out: nonce: pointer to a nonce object + * In: in66: pointer to the 66-byte nonce to be parsed */ -SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_session_init_verifier( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse( const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_session *session, - rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, - const unsigned char *msg32, - const rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, - const rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, - const unsigned char *const *commitments, - size_t n_signers -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6) SECP256K1_ARG_NONNULL(7); + rustsecp256k1zkp_v0_4_0_musig_aggnonce* nonce, + const unsigned char *in66 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Checks a signer's public nonce against a commitment to said nonce, and update - * data structure if they match - * - * Returns: 1: commitment was valid, data structure updated - * 0: commitment was invalid, nothing happened - * Args: ctx: pointer to a context object (cannot be NULL) - * signer: pointer to the signer data to update (cannot be NULL). Must have - * been used with `musig_session_get_public_nonce` or initialized - * with `musig_session_init_verifier`. - * In: nonce32: signer's alleged public nonce (cannot be NULL) +/** Serialize an aggregate public nonce + * + * Returns: 1 when the nonce could be serialized, 0 otherwise + * Args: ctx: a secp256k1 context object + * Out: out32: pointer to a 66-byte array to store the serialized nonce + * In: nonce: pointer to the nonce */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_set_nonce( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize( const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signer, - const unsigned char *nonce32 + unsigned char *out66, + const rustsecp256k1zkp_v0_4_0_musig_aggnonce* nonce ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Updates a session with the combined public nonce of all signers. The combined - * public nonce is the sum of every signer's public nonce. - * - * Returns: 1: nonces are successfully combined - * 0: a signer's nonce is missing - * Args: ctx: pointer to a context object (cannot be NULL) - * session: session to update with the combined public nonce (cannot be - * NULL) - * signers: an array of signers' data, which must have had public nonces - * set with `musig_set_nonce`. Array length must equal to `n_signers` - * (cannot be NULL) - * n_signers: the length of the signers array. Must be the total number of - * signers participating in the MuSig. - * Out: nonce_parity: if non-NULL, a pointer to an integer that indicates the - * parity of the combined public nonce. Used for adaptor - * signatures. - * adaptor: point to add to the combined public nonce. If NULL, nothing is - * added to the combined nonce. +/** Parse an aggregate public nonce. + * + * Returns: 1 when the nonce could be parsed, 0 otherwise. + * Args: ctx: a secp256k1 context object + * Out: nonce: pointer to a nonce object + * In: in66: pointer to the 66-byte nonce to be parsed */ -SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse( const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_session *session, - const rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, - size_t n_signers, - int *nonce_parity, - const rustsecp256k1zkp_v0_4_0_pubkey *adaptor + rustsecp256k1zkp_v0_4_0_musig_aggnonce* nonce, + const unsigned char *in66 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); /** Serialize a MuSig partial signature or adaptor signature @@ -354,13 +167,13 @@ SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces( * Out: out32: pointer to a 32-byte array to store the serialized signature * In: sig: pointer to the signature */ -SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_signature_serialize( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_sig_serialize( const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *out32, - const rustsecp256k1zkp_v0_4_0_musig_partial_signature* sig + const rustsecp256k1zkp_v0_4_0_musig_partial_sig* sig ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Parse and verify a MuSig partial signature. +/** Parse a MuSig partial signature. * * Returns: 1 when the signature could be parsed, 0 otherwise. * Args: ctx: a secp256k1 context object @@ -371,26 +184,207 @@ SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_signature_serialize( * encoded numbers are out of range, signature verification with it is * guaranteed to fail for every message and public key. */ -SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_signature_parse( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_sig_parse( const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_partial_signature* sig, + rustsecp256k1zkp_v0_4_0_musig_partial_sig* sig, const unsigned char *in32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); +/** Computes a aggregate public key and the hash of the given public keys. + * + * Different orders of `pubkeys` result in different `agg_pk`s. + * + * The pubkeys can be sorted before combining with `rustsecp256k1zkp_v0_4_0_xonly_sort` which + * ensures the same resulting `agg_pk` for the same multiset of pubkeys. + * This is useful to do before pubkey_agg, such that the order of pubkeys + * does not affect the aggregate public key. + * + * Returns: 1 if the public keys were successfully aggregated, 0 otherwise + * Args: ctx: pointer to a context object initialized for verification + * scratch: scratch space used to compute the aggregate pubkey by + * multiexponentiation. If NULL, an inefficient algorithm is used. + * Out: agg_pk: the MuSig-aggregated xonly public key. If you do not need it, + * this arg can be NULL. + * keyagg_cache: if non-NULL, pointer to a musig_keyagg_cache struct that + * is required for signing (or verifying the MuSig protocol). + * In: pubkeys: input array of pointers to public keys to aggregate. The order + * is important; a different order will result in a different + * aggregate public key + * n_pubkeys: length of pubkeys array. Must be greater than 0. + */ +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_pubkey_agg( + const rustsecp256k1zkp_v0_4_0_context* ctx, + rustsecp256k1zkp_v0_4_0_scratch_space *scratch, + rustsecp256k1zkp_v0_4_0_xonly_pubkey *agg_pk, + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, + const rustsecp256k1zkp_v0_4_0_xonly_pubkey * const* pubkeys, + size_t n_pubkeys +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(5); + +/** Tweak an x-only public key by adding the generator multiplied with tweak32 + * to it. The resulting output_pubkey with the original agg_pk output of + * musig_pubkey_agg and tweak passes `rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_test`. + * + * This function is only useful before initializing a signing session. If you + * are only computing a public key, but not intending to create a signature for + * it, you can just use `rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_add`. Can only be called + * once with a given keyagg_cache. + * + * Returns: 0 if the arguments are invalid or the resulting public key would be + * invalid (only when the tweak is the negation of the corresponding + * secret key) or if the key has already been tweaked. 1 otherwise. + * Args: ctx: pointer to a context object initialized for verification + * Out: output_pubkey: pointer to a public key to store the result. Will be set + * to an invalid value if this function returns 0. If you + * do not need it, this arg can be NULL. + * tweak32: pointer to a 32-byte tweak. If the tweak is invalid + * according to rustsecp256k1zkp_v0_4_0_ec_seckey_verify, this function + * returns 0. For uniformly random 32-byte arrays the + * chance of being invalid is negligible (around 1 in + * 2^128). + * In/Out: keyagg_cache: pointer to a `musig_keyagg_cache` struct initialized in + * `musig_pubkey_agg` + */ +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add( + const rustsecp256k1zkp_v0_4_0_context* ctx, + rustsecp256k1zkp_v0_4_0_pubkey *output_pubkey, + const unsigned char *tweak32, + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); + +/** Starts a signing session by generating a nonce + * + * This function outputs a secret nonce that will be required for signing and a + * corresponding public nonce that is intended to be sent to other signers. + * + * MuSig differs from regular Schnorr signing in that implementers _must_ take + * special care to not reuse a nonce. This can be ensured by following these rules: + * + * 1. Always provide a unique session_id32. It is a "number used once". + * 2. If you already know the signing key, message or aggregate public key + * cache, they can be optionally provided to derive the nonce and increase + * misuse-resistance. The extra_input32 argument can be used to provide + * additional data that does not repeat in normal scenarios, such as the + * current time. + * 3. If you do not provide a seckey, session_id32 _must_ be UNIFORMLY RANDOM. + * If you do provide a seckey, session_id32 can instead be a counter (that + * must never repeat!). However, it is recommended to always choose + * session_id32 uniformly at random. Note that using the same seckey for + * multiple MuSig sessions is fine. + * 4. Avoid copying (or serializing) the secnonce. This reduces the possibility + * that it is used more than once for signing. + * + * Remember that nonce reuse will immediately leak the secret key! + * + * Returns: 0 if the arguments are invalid and 1 otherwise + * Args: ctx: pointer to a context object, initialized for signing + * Out: secnonce: pointer to a structure to store the secret nonce + * pubnonce: pointer to a structure to store the public nonce + * In: session_id32: a 32-byte session_id32 as explained above. Must be + * uniformly random unless you really know what you are + * doing. + * seckey: the 32-byte secret key that will be used for signing if + * already known (can be NULL) + * msg32: the 32-byte message that will be signed if already known + * (can be NULL) + * keyagg_cache: pointer to the keyagg_cache that was used to create the aggregate + * (and tweaked) public key if already known (can be NULL) + * extra_input32: an optional 32-byte array that is input to the nonce + * derivation function (can be NULL) + */ +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_nonce_gen( + const rustsecp256k1zkp_v0_4_0_context* ctx, + rustsecp256k1zkp_v0_4_0_musig_secnonce *secnonce, + rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce, + const unsigned char *session_id32, + const unsigned char *seckey, + const unsigned char *msg32, + const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, + const unsigned char *extra_input32 +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(4); + +/** Aggregates the nonces of every signer into a single nonce + * + * This can be done by an untrusted third party to reduce the communication + * between signers. Instead of everyone sending nonces to everyone else, there + * can be one party receiving all nonces, aggregating the nonces with this + * function and then sending only the aggregate nonce back to the signers. + * + * Returns: 0 if the arguments are invalid or if all signers sent invalid + * pubnonces, 1 otherwise + * Args: ctx: pointer to a context object + * Out: aggnonce: pointer to an the aggregate public nonce object for + * musig_nonce_process + * In: pubnonces: array of pointers to public nonces sent by the + * signers + * n_pubnonces: number of elements in the pubnonces array. Must be + * greater than 0. + */ +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_nonce_agg( + const rustsecp256k1zkp_v0_4_0_context* ctx, + rustsecp256k1zkp_v0_4_0_musig_aggnonce *aggnonce, + const rustsecp256k1zkp_v0_4_0_musig_pubnonce * const* pubnonces, + size_t n_pubnonces +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Takes the public nonces of all signers and computes a session cache that is + * required for signing and verification of partial signatures and a signature + * template that is required for combining partial signatures. + * + * If the adaptor argument is non-NULL then the output of musig_partial_sig_agg + * will be an invalid Schnorr signature, until the signature is given to + * musig_adapt with the corresponding secret adaptor. + * + * Returns: 0 if the arguments are invalid or if all signers sent invalid + * pubnonces, 1 otherwise + * Args: ctx: pointer to a context object, initialized for verification + * Out: session: pointer to a struct to store the session + * In: aggnonce: pointer to an the aggregate public nonce object that is + * output of musig_nonce_agg + * msg32: the 32-byte message to sign + * keyagg_cache: pointer to the keyagg_cache that was used to create the + * aggregate (and tweaked) pubkey + * adaptor: optional pointer to an adaptor point encoded as a public + * key if this signing session is part of an adaptor + * signature protocol + */ +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_nonce_process( + const rustsecp256k1zkp_v0_4_0_context* ctx, + rustsecp256k1zkp_v0_4_0_musig_session *session, + const rustsecp256k1zkp_v0_4_0_musig_aggnonce *aggnonce, + const unsigned char *msg32, + const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, + const rustsecp256k1zkp_v0_4_0_pubkey *adaptor +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); + /** Produces a partial signature * - * Returns: 1: partial signature constructed - * 0: session in incorrect or inconsistent state - * Args: ctx: pointer to a context object (cannot be NULL) - * session: active signing session for which the combined nonce has been - * computed (cannot be NULL) - * Out: partial_sig: partial signature (cannot be NULL) + * This function sets the given secnonce to 0 and will abort if given a + * secnonce that is 0. This is a best effort attempt to protect against nonce + * reuse. However, this is of course easily defeated if the secnonce has been + * copied (or serialized). Remember that nonce reuse will immediately leak the + * secret key! + * + * Returns: 0 if the arguments are invalid or the provided secnonce has already + * been used for signing, 1 otherwise + * Args: ctx: pointer to a context object + * Out: partial_sig: pointer to struct to store the partial signature + * In/Out: secnonce: pointer to the secnonce struct created in + * musig_nonce_gen + * In: keypair: pointer to keypair to sign the message with + * keyagg_cache: pointer to the keyagg_cache that was output when the + * aggregate public key for this session + * session: pointer to the session that was created with + * musig_nonce_process */ SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_sign( const rustsecp256k1zkp_v0_4_0_context* ctx, - const rustsecp256k1zkp_v0_4_0_musig_session *session, - rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sig -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig, + rustsecp256k1zkp_v0_4_0_musig_secnonce *secnonce, + const rustsecp256k1zkp_v0_4_0_keypair *keypair, + const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, + const rustsecp256k1zkp_v0_4_0_musig_session *session +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6); /** Checks that an individual partial signature verifies * @@ -400,84 +394,112 @@ SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_sign( * problem will be caught. But this function allows determining the specific party * who produced an invalid signature, so that signing can be restarted without them. * - * Returns: 1: partial signature verifies - * 0: invalid signature or bad data - * Args: ctx: pointer to a context object (cannot be NULL) - * session: active session for which the combined nonce has been computed - * (cannot be NULL) - * signer: data for the signer who produced this signature (cannot be NULL) - * In: partial_sig: signature to verify (cannot be NULL) - * pubkey: public key of the signer who produced the signature (cannot be NULL) + * Returns: 0 if the arguments are invalid or the partial signature does not + * verify + * Args ctx: pointer to a context object, initialized for verification + * In: partial_sig: pointer to partial signature to verify + * pubnonce: public nonce sent by the signer who produced the + * signature + * pubkey: public key of the signer who produced the signature + * keyagg_cache: pointer to the keyagg_cache that was output when the + * aggregate public key for this session + * session: pointer to the session that was created with + * musig_nonce_process */ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify( const rustsecp256k1zkp_v0_4_0_context* ctx, - const rustsecp256k1zkp_v0_4_0_musig_session *session, - const rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signer, - const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sig, - const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5); + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig, + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce, + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey, + const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, + const rustsecp256k1zkp_v0_4_0_musig_session *session +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4) SECP256K1_ARG_NONNULL(5) SECP256K1_ARG_NONNULL(6); -/** Combines partial signatures - * - * Returns: 1: all partial signatures have values in range. Does NOT mean the - * resulting signature verifies. - * 0: some partial signature are missing or had s or r out of range - * Args: ctx: pointer to a context object (cannot be NULL) - * session: initialized session for which the combined nonce has been - * computed (cannot be NULL) - * Out: sig64: complete signature (cannot be NULL) - * In: partial_sigs: array of partial signatures to combine (cannot be NULL) - * n_sigs: number of signatures in the partial_sigs array +/** Aggregates partial signatures + * + * Returns: 0 if the arguments are invalid or a partial_sig is out of range, 1 + * otherwise (which does NOT mean the resulting signature verifies). + * Args: ctx: pointer to a context object + * Out: sig64: complete Schnorr signature + * In: session: pointer to the session that was created with + * musig_nonce_process + * partial_sigs: array of pointers to partial signatures to aggregate + * n_sigs: number of elements in the partial_sigs array */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine( +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg( const rustsecp256k1zkp_v0_4_0_context* ctx, - const rustsecp256k1zkp_v0_4_0_musig_session *session, unsigned char *sig64, - const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sigs, + const rustsecp256k1zkp_v0_4_0_musig_session *session, + const rustsecp256k1zkp_v0_4_0_musig_partial_sig * const* partial_sigs, size_t n_sigs ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); -/** Converts a partial signature to an adaptor signature by adding a given secret - * adaptor. +/** Extracts the nonce_parity bit from a session + * + * This is used for adaptor signatures. * - * Returns: 1: signature and secret adaptor contained valid values + * Returns: 0 if one of the arguments was NULL, and 1 otherwise. + * Args: ctx: pointer to a context object + * Out: nonce_parity: pointer to an integer that indicates the parity + * of the aggregate public nonce. Used for adaptor + * signatures. + * In: session: pointer to the session that was created with + * musig_nonce_process + */ +int rustsecp256k1zkp_v0_4_0_musig_nonce_parity( + const rustsecp256k1zkp_v0_4_0_context* ctx, + int *nonce_parity, + rustsecp256k1zkp_v0_4_0_musig_session *session +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); + +/** Converts a pre-signature that misses the adaptor into a full signature + * + * If the sec_adaptor32 argument is incorrect, the adapted signature will be + * invalid. This function does not verify the adapted signature. + * + * Returns: 1: signature and secret adaptor contained valid values (which does + * NOT mean the signature or the adaptor are valid!) * 0: otherwise - * Args: ctx: pointer to a context object (cannot be NULL) - * Out: adaptor_sig: adaptor signature to produce (cannot be NULL) - * In: partial_sig: partial signature to tweak with secret adaptor (cannot be NULL) - * sec_adaptor32: 32-byte secret adaptor to add to the partial signature (cannot - * be NULL) - * nonce_parity: the `nonce_parity` output of `musig_session_combine_nonces` + * Args: ctx: pointer to a context object + * In/Out: sig64: 64-byte pre-signature that is adapted to a full signature + * In: sec_adaptor32: 32-byte secret adaptor to add to the partial signature + * nonce_parity: the output of `musig_nonce_parity` called with the + * session used for producing sig64 */ -SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt( +SECP256K1_API int rustsecp256k1zkp_v0_4_0_musig_adapt( const rustsecp256k1zkp_v0_4_0_context* ctx, - rustsecp256k1zkp_v0_4_0_musig_partial_signature *adaptor_sig, - const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sig, + unsigned char *sig64, const unsigned char *sec_adaptor32, int nonce_parity -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3); -/** Extracts a secret adaptor from a MuSig, given all parties' partial - * signatures. This function will not fail unless given grossly invalid data; if it - * is merely given signatures that do not verify, the returned value will be - * nonsense. It is therefore important that all data be verified at earlier steps of - * any protocol that uses this function. +/** Extracts a secret adaptor from a MuSig pre-signature and corresponding + * signature + * + * This function will not fail unless given grossly invalid data; if it is + * merely given signatures that do not verify, the returned value will be + * nonsense. It is therefore important that all data be verified at earlier + * steps of any protocol that uses this function. In particular, this includes + * verifying all partial signatures that were aggregated into pre_sig64. * - * Returns: 1: signatures contained valid data such that an adaptor could be extracted + * Returns: 1: signatures contained valid data such that an adaptor could be + * extracted (which does NOT mean the signatures or the adaptor are + * valid!) * 0: otherwise - * Args: ctx: pointer to a context object (cannot be NULL) - * Out:sec_adaptor32: 32-byte secret adaptor (cannot be NULL) - * In: sig64: complete 2-of-2 signature (cannot be NULL) - * partial_sigs: array of partial signatures (cannot be NULL) - * n_partial_sigs: number of elements in partial_sigs array - * nonce_parity: the `nonce_parity` output of `musig_session_combine_nonces` + * Args: ctx: pointer to a context object + * Out:sec_adaptor32: 32-byte secret adaptor + * In: sig64: complete, valid 64-byte signature + * pre_sig64: the pre-signature corresponding to sig64, i.e., the + * aggregate of partial signatures without the secret + * adaptor + * nonce_parity: the output of `musig_nonce_parity` called with the + * session used for producing sig64 */ -SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor( +SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_musig_extract_adaptor( const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sec_adaptor32, const unsigned char *sig64, - const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sigs, - size_t n_partial_sigs, + const unsigned char *pre_sig64, int nonce_parity ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); diff --git a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_schnorrsig.h b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_schnorrsig.h index 6f19c7eb..e11d92ed 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_schnorrsig.h +++ b/secp256k1-zkp-sys/depend/secp256k1/include/secp256k1_schnorrsig.h @@ -23,24 +23,29 @@ extern "C" { * * Returns: 1 if a nonce was successfully generated. 0 will cause signing to * return an error. - * Out: nonce32: pointer to a 32-byte array to be filled by the function. - * In: msg32: the 32-byte message hash being verified (will not be NULL) - * key32: pointer to a 32-byte secret key (will not be NULL) - * xonly_pk32: the 32-byte serialized xonly pubkey corresponding to key32 - * (will not be NULL) - * algo16: pointer to a 16-byte array describing the signature - * algorithm (will not be NULL). - * data: Arbitrary data pointer that is passed through. + * Out: nonce32: pointer to a 32-byte array to be filled by the function + * In: msg: the message being verified. Is NULL if and only if msglen + * is 0. + * msglen: the length of the message + * key32: pointer to a 32-byte secret key (will not be NULL) + * xonly_pk32: the 32-byte serialized xonly pubkey corresponding to key32 + * (will not be NULL) + * algo: pointer to an array describing the signature + * algorithm (will not be NULL) + * algolen: the length of the algo array + * data: arbitrary data pointer that is passed through * * Except for test cases, this function should compute some cryptographic hash of * the message, the key, the pubkey, the algorithm description, and data. */ typedef int (*rustsecp256k1zkp_v0_4_0_nonce_function_hardened)( unsigned char *nonce32, - const unsigned char *msg32, + const unsigned char *msg, + size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, - const unsigned char *algo16, + const unsigned char *algo, + size_t algolen, void *data ); @@ -50,59 +55,113 @@ typedef int (*rustsecp256k1zkp_v0_4_0_nonce_function_hardened)( * * If a data pointer is passed, it is assumed to be a pointer to 32 bytes of * auxiliary random data as defined in BIP-340. If the data pointer is NULL, - * schnorrsig_sign does not produce BIP-340 compliant signatures. The algo16 - * argument must be non-NULL, otherwise the function will fail and return 0. - * The hash will be tagged with algo16 after removing all terminating null - * bytes. Therefore, to create BIP-340 compliant signatures, algo16 must be set - * to "BIP0340/nonce\0\0\0" + * the nonce derivation procedure follows BIP-340 by setting the auxiliary + * random data to zero. The algo argument must be non-NULL, otherwise the + * function will fail and return 0. The hash will be tagged with algo. + * Therefore, to create BIP-340 compliant signatures, algo must be set to + * "BIP0340/nonce" and algolen to 13. */ SECP256K1_API extern const rustsecp256k1zkp_v0_4_0_nonce_function_hardened rustsecp256k1zkp_v0_4_0_nonce_function_bip340; +/** Data structure that contains additional arguments for schnorrsig_sign_custom. + * + * A schnorrsig_extraparams structure object can be initialized correctly by + * setting it to SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT. + * + * Members: + * magic: set to SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC at initialization + * and has no other function than making sure the object is + * initialized. + * noncefp: pointer to a nonce generation function. If NULL, + * rustsecp256k1zkp_v0_4_0_nonce_function_bip340 is used + * ndata: pointer to arbitrary data used by the nonce generation function + * (can be NULL). If it is non-NULL and + * rustsecp256k1zkp_v0_4_0_nonce_function_bip340 is used, then ndata must be a + * pointer to 32-byte auxiliary randomness as per BIP-340. + */ +typedef struct { + unsigned char magic[4]; + rustsecp256k1zkp_v0_4_0_nonce_function_hardened noncefp; + void* ndata; +} rustsecp256k1zkp_v0_4_0_schnorrsig_extraparams; + +#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC { 0xda, 0x6f, 0xb3, 0x8c } +#define SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT {\ + SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC,\ + NULL,\ + NULL\ +} + /** Create a Schnorr signature. * * Does _not_ strictly follow BIP-340 because it does not verify the resulting * signature. Instead, you can manually use rustsecp256k1zkp_v0_4_0_schnorrsig_verify and * abort if it fails. * - * Otherwise BIP-340 compliant if the noncefp argument is NULL or - * rustsecp256k1zkp_v0_4_0_nonce_function_bip340 and the ndata argument is 32-byte auxiliary - * randomness. + * This function only signs 32-byte messages. If you have messages of a + * different size (or the same size but without a context-specific tag + * prefix), it is recommended to create a 32-byte message hash with + * rustsecp256k1zkp_v0_4_0_tagged_sha256 and then sign the hash. Tagged hashing allows + * providing an context-specific tag for domain separation. This prevents + * signatures from being valid in multiple contexts by accident. * * Returns 1 on success, 0 on failure. * Args: ctx: pointer to a context object, initialized for signing (cannot be NULL) * Out: sig64: pointer to a 64-byte array to store the serialized signature (cannot be NULL) * In: msg32: the 32-byte message being signed (cannot be NULL) * keypair: pointer to an initialized keypair (cannot be NULL) - * noncefp: pointer to a nonce generation function. If NULL, rustsecp256k1zkp_v0_4_0_nonce_function_bip340 is used - * ndata: pointer to arbitrary data used by the nonce generation - * function (can be NULL). If it is non-NULL and - * rustsecp256k1zkp_v0_4_0_nonce_function_bip340 is used, then ndata must be a - * pointer to 32-byte auxiliary randomness as per BIP-340. + * aux_rand32: 32 bytes of fresh randomness. While recommended to provide + * this, it is only supplemental to security and can be NULL. See + * BIP-340 "Default Signing" for a full explanation of this + * argument and for guidance if randomness is expensive. */ SECP256K1_API int rustsecp256k1zkp_v0_4_0_schnorrsig_sign( const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_keypair *keypair, - rustsecp256k1zkp_v0_4_0_nonce_function_hardened noncefp, - void *ndata + unsigned char *aux_rand32 ) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +/** Create a Schnorr signature with a more flexible API. + * + * Same arguments as rustsecp256k1zkp_v0_4_0_schnorrsig_sign except that it allows signing + * variable length messages and accepts a pointer to an extraparams object that + * allows customizing signing by passing additional arguments. + * + * Creates the same signatures as schnorrsig_sign if msglen is 32 and the + * extraparams.ndata is the same as aux_rand32. + * + * In: msg: the message being signed. Can only be NULL if msglen is 0. + * msglen: length of the message + * extraparams: pointer to a extraparams object (can be NULL) + */ +SECP256K1_API int rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom( + const rustsecp256k1zkp_v0_4_0_context* ctx, + unsigned char *sig64, + const unsigned char *msg, + size_t msglen, + const rustsecp256k1zkp_v0_4_0_keypair *keypair, + rustsecp256k1zkp_v0_4_0_schnorrsig_extraparams *extraparams +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); + /** Verify a Schnorr signature. * * Returns: 1: correct signature * 0: incorrect signature * Args: ctx: a secp256k1 context object, initialized for verification. * In: sig64: pointer to the 64-byte signature to verify (cannot be NULL) - * msg32: the 32-byte message being verified (cannot be NULL) + * msg: the message being verified. Can only be NULL if msglen is 0. + * msglen: length of the message * pubkey: pointer to an x-only public key to verify with (cannot be NULL) */ SECP256K1_API SECP256K1_WARN_UNUSED_RESULT int rustsecp256k1zkp_v0_4_0_schnorrsig_verify( const rustsecp256k1zkp_v0_4_0_context* ctx, const unsigned char *sig64, - const unsigned char *msg32, + const unsigned char *msg, + size_t msglen, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey -) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(3) SECP256K1_ARG_NONNULL(4); +) SECP256K1_ARG_NONNULL(1) SECP256K1_ARG_NONNULL(2) SECP256K1_ARG_NONNULL(5); #ifdef __cplusplus } diff --git a/secp256k1-zkp-sys/depend/secp256k1/obj/.gitignore b/secp256k1-zkp-sys/depend/secp256k1/obj/.gitignore deleted file mode 100644 index e69de29b..00000000 diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/basic-config.h b/secp256k1-zkp-sys/depend/secp256k1/src/basic-config.h index bb6b5825..6f7693cb 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/basic-config.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/basic-config.h @@ -9,25 +9,8 @@ #ifdef USE_BASIC_CONFIG -#undef USE_ASM_X86_64 -#undef USE_ECMULT_STATIC_PRECOMPUTATION -#undef USE_EXTERNAL_ASM -#undef USE_EXTERNAL_DEFAULT_CALLBACKS -#undef USE_FIELD_INV_BUILTIN -#undef USE_FIELD_INV_NUM -#undef USE_NUM_GMP -#undef USE_NUM_NONE -#undef USE_SCALAR_INV_BUILTIN -#undef USE_SCALAR_INV_NUM -#undef USE_FORCE_WIDEMUL_INT64 -#undef USE_FORCE_WIDEMUL_INT128 -#undef ECMULT_WINDOW_SIZE - -#define USE_NUM_NONE 1 -#define USE_FIELD_INV_BUILTIN 1 -#define USE_SCALAR_INV_BUILTIN 1 -#define USE_WIDEMUL_64 1 #define ECMULT_WINDOW_SIZE 15 +#define ECMULT_GEN_PREC_BITS 4 #endif /* USE_BASIC_CONFIG */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecdh.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecdh.c index 5460b20c..31ef06ed 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecdh.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecdh.c @@ -6,8 +6,8 @@ #include -#include "include/secp256k1.h" -#include "include/secp256k1_ecdh.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_ecdh.h" #include "util.h" #include "bench.h" diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecmult.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecmult.c index d13ed720..eb3b5d87 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecmult.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_ecmult.c @@ -5,43 +5,187 @@ ***********************************************************************/ #include -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "util.h" #include "hash_impl.h" -#include "num_impl.h" #include "field_impl.h" #include "group_impl.h" #include "scalar_impl.h" #include "ecmult_impl.h" #include "bench.h" -#include "secp256k1.c" #define POINTS 32768 +void help(char **argv) { + printf("Benchmark EC multiplication algorithms\n"); + printf("\n"); + printf("Usage: %s \n", argv[0]); + printf("The output shows the number of multiplied and summed points right after the\n"); + printf("function name. The letter 'g' indicates that one of the points is the generator.\n"); + printf("The benchmarks are divided by the number of points.\n"); + printf("\n"); + printf("default (ecmult_multi): picks pippenger_wnaf or strauss_wnaf depending on the\n"); + printf(" batch size\n"); + printf("pippenger_wnaf: for all batch sizes\n"); + printf("strauss_wnaf: for all batch sizes\n"); + printf("simple: multiply and sum each point individually\n"); +} + typedef struct { /* Setup once in advance */ rustsecp256k1zkp_v0_4_0_context* ctx; rustsecp256k1zkp_v0_4_0_scratch_space* scratch; rustsecp256k1zkp_v0_4_0_scalar* scalars; rustsecp256k1zkp_v0_4_0_ge* pubkeys; + rustsecp256k1zkp_v0_4_0_gej* pubkeys_gej; rustsecp256k1zkp_v0_4_0_scalar* seckeys; rustsecp256k1zkp_v0_4_0_gej* expected_output; rustsecp256k1zkp_v0_4_0_ecmult_multi_func ecmult_multi; - /* Changes per test */ + /* Changes per benchmark */ size_t count; int includes_g; - /* Changes per test iteration */ + /* Changes per benchmark iteration, used to pick different scalars and pubkeys + * in each run. */ size_t offset1; size_t offset2; - /* Test output. */ + /* Benchmark output. */ rustsecp256k1zkp_v0_4_0_gej* output; } bench_data; -static int bench_callback(rustsecp256k1zkp_v0_4_0_scalar* sc, rustsecp256k1zkp_v0_4_0_ge* ge, size_t idx, void* arg) { +/* Hashes x into [0, POINTS) twice and store the result in offset1 and offset2. */ +static void hash_into_offset(bench_data* data, size_t x) { + data->offset1 = (x * 0x537b7f6f + 0x8f66a481) % POINTS; + data->offset2 = (x * 0x7f6f537b + 0x6a1a8f49) % POINTS; +} + +/* Check correctness of the benchmark by computing + * sum(outputs) ?= (sum(scalars_gen) + sum(seckeys)*sum(scalars))*G */ +static void bench_ecmult_teardown_helper(bench_data* data, size_t* seckey_offset, size_t* scalar_offset, size_t* scalar_gen_offset, int iters) { + int i; + rustsecp256k1zkp_v0_4_0_gej sum_output, tmp; + rustsecp256k1zkp_v0_4_0_scalar sum_scalars; + + rustsecp256k1zkp_v0_4_0_gej_set_infinity(&sum_output); + rustsecp256k1zkp_v0_4_0_scalar_clear(&sum_scalars); + for (i = 0; i < iters; ++i) { + rustsecp256k1zkp_v0_4_0_gej_add_var(&sum_output, &sum_output, &data->output[i], NULL); + if (scalar_gen_offset != NULL) { + rustsecp256k1zkp_v0_4_0_scalar_add(&sum_scalars, &sum_scalars, &data->scalars[(*scalar_gen_offset+i) % POINTS]); + } + if (seckey_offset != NULL) { + rustsecp256k1zkp_v0_4_0_scalar s = data->seckeys[(*seckey_offset+i) % POINTS]; + rustsecp256k1zkp_v0_4_0_scalar_mul(&s, &s, &data->scalars[(*scalar_offset+i) % POINTS]); + rustsecp256k1zkp_v0_4_0_scalar_add(&sum_scalars, &sum_scalars, &s); + } + } + rustsecp256k1zkp_v0_4_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &tmp, &sum_scalars); + rustsecp256k1zkp_v0_4_0_gej_neg(&tmp, &tmp); + rustsecp256k1zkp_v0_4_0_gej_add_var(&tmp, &tmp, &sum_output, NULL); + CHECK(rustsecp256k1zkp_v0_4_0_gej_is_infinity(&tmp)); +} + +static void bench_ecmult_setup(void* arg) { + bench_data* data = (bench_data*)arg; + /* Re-randomize offset to ensure that we're using different scalars and + * group elements in each run. */ + hash_into_offset(data, data->offset1); +} + +static void bench_ecmult_gen(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + rustsecp256k1zkp_v0_4_0_ecmult_gen(&data->ctx->ecmult_gen_ctx, &data->output[i], &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_gen_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters); +} + +static void bench_ecmult_const(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + rustsecp256k1zkp_v0_4_0_ecmult_const(&data->output[i], &data->pubkeys[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], 256); + } +} + +static void bench_ecmult_const_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters); +} + +static void bench_ecmult_1(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters; ++i) { + rustsecp256k1zkp_v0_4_0_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], NULL); + } +} + +static void bench_ecmult_1_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, NULL, iters); +} + +static void bench_ecmult_1g(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + rustsecp256k1zkp_v0_4_0_scalar zero; + int i; + + rustsecp256k1zkp_v0_4_0_scalar_set_int(&zero, 0); + for (i = 0; i < iters; ++i) { + rustsecp256k1zkp_v0_4_0_ecmult(&data->ctx->ecmult_ctx, &data->output[i], NULL, &zero, &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_1g_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, NULL, NULL, &data->offset1, iters); +} + +static void bench_ecmult_2g(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + int i; + + for (i = 0; i < iters/2; ++i) { + rustsecp256k1zkp_v0_4_0_ecmult(&data->ctx->ecmult_ctx, &data->output[i], &data->pubkeys_gej[(data->offset1+i) % POINTS], &data->scalars[(data->offset2+i) % POINTS], &data->scalars[(data->offset1+i) % POINTS]); + } +} + +static void bench_ecmult_2g_teardown(void* arg, int iters) { + bench_data* data = (bench_data*)arg; + bench_ecmult_teardown_helper(data, &data->offset1, &data->offset2, &data->offset1, iters/2); +} + +static void run_ecmult_bench(bench_data* data, int iters) { + char str[32]; + sprintf(str, "ecmult_gen"); + run_benchmark(str, bench_ecmult_gen, bench_ecmult_setup, bench_ecmult_gen_teardown, data, 10, iters); + sprintf(str, "ecmult_const"); + run_benchmark(str, bench_ecmult_const, bench_ecmult_setup, bench_ecmult_const_teardown, data, 10, iters); + /* ecmult with non generator point */ + sprintf(str, "ecmult 1"); + run_benchmark(str, bench_ecmult_1, bench_ecmult_setup, bench_ecmult_1_teardown, data, 10, iters); + /* ecmult with generator point */ + sprintf(str, "ecmult 1g"); + run_benchmark(str, bench_ecmult_1g, bench_ecmult_setup, bench_ecmult_1g_teardown, data, 10, iters); + /* ecmult with generator and non-generator point. The reported time is per point. */ + sprintf(str, "ecmult 2g"); + run_benchmark(str, bench_ecmult_2g, bench_ecmult_setup, bench_ecmult_2g_teardown, data, 10, 2*iters); +} + +static int bench_ecmult_multi_callback(rustsecp256k1zkp_v0_4_0_scalar* sc, rustsecp256k1zkp_v0_4_0_ge* ge, size_t idx, void* arg) { bench_data* data = (bench_data*)arg; if (data->includes_g) ++idx; if (idx == 0) { @@ -54,7 +198,7 @@ static int bench_callback(rustsecp256k1zkp_v0_4_0_scalar* sc, rustsecp256k1zkp_v return 1; } -static void bench_ecmult(void* arg, int iters) { +static void bench_ecmult_multi(void* arg, int iters) { bench_data* data = (bench_data*)arg; int includes_g = data->includes_g; @@ -63,19 +207,18 @@ static void bench_ecmult(void* arg, int iters) { iters = iters / data->count; for (iter = 0; iter < iters; ++iter) { - data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_callback, arg, count - includes_g); + data->ecmult_multi(&data->ctx->error_callback, &data->ctx->ecmult_ctx, data->scratch, &data->output[iter], data->includes_g ? &data->scalars[data->offset1] : NULL, bench_ecmult_multi_callback, arg, count - includes_g); data->offset1 = (data->offset1 + count) % POINTS; data->offset2 = (data->offset2 + count - 1) % POINTS; } } -static void bench_ecmult_setup(void* arg) { +static void bench_ecmult_multi_setup(void* arg) { bench_data* data = (bench_data*)arg; - data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; - data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; + hash_into_offset(data, data->count); } -static void bench_ecmult_teardown(void* arg, int iters) { +static void bench_ecmult_multi_teardown(void* arg, int iters) { bench_data* data = (bench_data*)arg; int iter; iters = iters / data->count; @@ -89,7 +232,7 @@ static void bench_ecmult_teardown(void* arg, int iters) { static void generate_scalar(uint32_t num, rustsecp256k1zkp_v0_4_0_scalar* scalar) { rustsecp256k1zkp_v0_4_0_sha256 sha256; - unsigned char c[11] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; + unsigned char c[10] = {'e', 'c', 'm', 'u', 'l', 't', 0, 0, 0, 0}; unsigned char buf[32]; int overflow = 0; c[6] = num; @@ -103,7 +246,7 @@ static void generate_scalar(uint32_t num, rustsecp256k1zkp_v0_4_0_scalar* scalar CHECK(!overflow); } -static void run_test(bench_data* data, size_t count, int includes_g, int num_iters) { +static void run_ecmult_multi_bench(bench_data* data, size_t count, int includes_g, int num_iters) { char str[32]; static const rustsecp256k1zkp_v0_4_0_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); size_t iters = 1 + num_iters / count; @@ -113,8 +256,7 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite data->includes_g = includes_g; /* Compute (the negation of) the expected results directly. */ - data->offset1 = (data->count * 0x537b7f6f + 0x8f66a481) % POINTS; - data->offset2 = (data->count * 0x7f6f537b + 0x6a1a8f49) % POINTS; + hash_into_offset(data, data->count); for (iter = 0; iter < iters; ++iter) { rustsecp256k1zkp_v0_4_0_scalar tmp; rustsecp256k1zkp_v0_4_0_scalar total = data->scalars[(data->offset1++) % POINTS]; @@ -128,25 +270,26 @@ static void run_test(bench_data* data, size_t count, int includes_g, int num_ite } /* Run the benchmark. */ - sprintf(str, includes_g ? "ecmult_%ig" : "ecmult_%i", (int)count); - run_benchmark(str, bench_ecmult, bench_ecmult_setup, bench_ecmult_teardown, data, 10, count * iters); + sprintf(str, includes_g ? "ecmult_multi %ig" : "ecmult_multi %i", (int)count); + run_benchmark(str, bench_ecmult_multi, bench_ecmult_multi_setup, bench_ecmult_multi_teardown, data, 10, count * iters); } int main(int argc, char **argv) { bench_data data; int i, p; - rustsecp256k1zkp_v0_4_0_gej* pubkeys_gej; size_t scratch_size; int iters = get_iters(10000); - data.ctx = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); - scratch_size = rustsecp256k1zkp_v0_4_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; - data.scratch = rustsecp256k1zkp_v0_4_0_scratch_space_create(data.ctx, scratch_size); data.ecmult_multi = rustsecp256k1zkp_v0_4_0_ecmult_multi_var; if (argc > 1) { - if(have_flag(argc, argv, "pippenger_wnaf")) { + if(have_flag(argc, argv, "-h") + || have_flag(argc, argv, "--help") + || have_flag(argc, argv, "help")) { + help(argv); + return 1; + } else if(have_flag(argc, argv, "pippenger_wnaf")) { printf("Using pippenger_wnaf:\n"); data.ecmult_multi = rustsecp256k1zkp_v0_4_0_ecmult_pippenger_batch_single; } else if(have_flag(argc, argv, "strauss_wnaf")) { @@ -154,39 +297,48 @@ int main(int argc, char **argv) { data.ecmult_multi = rustsecp256k1zkp_v0_4_0_ecmult_strauss_batch_single; } else if(have_flag(argc, argv, "simple")) { printf("Using simple algorithm:\n"); - data.ecmult_multi = rustsecp256k1zkp_v0_4_0_ecmult_multi_var; - rustsecp256k1zkp_v0_4_0_scratch_space_destroy(data.ctx, data.scratch); - data.scratch = NULL; } else { - fprintf(stderr, "%s: unrecognized argument '%s'.\n", argv[0], argv[1]); - fprintf(stderr, "Use 'pippenger_wnaf', 'strauss_wnaf', 'simple' or no argument to benchmark a combined algorithm.\n"); + fprintf(stderr, "%s: unrecognized argument '%s'.\n\n", argv[0], argv[1]); + help(argv); return 1; } } + data.ctx = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); + scratch_size = rustsecp256k1zkp_v0_4_0_strauss_scratch_size(POINTS) + STRAUSS_SCRATCH_OBJECTS*16; + if (!have_flag(argc, argv, "simple")) { + data.scratch = rustsecp256k1zkp_v0_4_0_scratch_space_create(data.ctx, scratch_size); + } else { + data.scratch = NULL; + } + /* Allocate stuff */ data.scalars = malloc(sizeof(rustsecp256k1zkp_v0_4_0_scalar) * POINTS); data.seckeys = malloc(sizeof(rustsecp256k1zkp_v0_4_0_scalar) * POINTS); data.pubkeys = malloc(sizeof(rustsecp256k1zkp_v0_4_0_ge) * POINTS); + data.pubkeys_gej = malloc(sizeof(rustsecp256k1zkp_v0_4_0_gej) * POINTS); data.expected_output = malloc(sizeof(rustsecp256k1zkp_v0_4_0_gej) * (iters + 1)); data.output = malloc(sizeof(rustsecp256k1zkp_v0_4_0_gej) * (iters + 1)); /* Generate a set of scalars, and private/public keypairs. */ - pubkeys_gej = malloc(sizeof(rustsecp256k1zkp_v0_4_0_gej) * POINTS); - rustsecp256k1zkp_v0_4_0_gej_set_ge(&pubkeys_gej[0], &rustsecp256k1zkp_v0_4_0_ge_const_g); + rustsecp256k1zkp_v0_4_0_gej_set_ge(&data.pubkeys_gej[0], &rustsecp256k1zkp_v0_4_0_ge_const_g); rustsecp256k1zkp_v0_4_0_scalar_set_int(&data.seckeys[0], 1); for (i = 0; i < POINTS; ++i) { generate_scalar(i, &data.scalars[i]); if (i) { - rustsecp256k1zkp_v0_4_0_gej_double_var(&pubkeys_gej[i], &pubkeys_gej[i - 1], NULL); + rustsecp256k1zkp_v0_4_0_gej_double_var(&data.pubkeys_gej[i], &data.pubkeys_gej[i - 1], NULL); rustsecp256k1zkp_v0_4_0_scalar_add(&data.seckeys[i], &data.seckeys[i - 1], &data.seckeys[i - 1]); } } - rustsecp256k1zkp_v0_4_0_ge_set_all_gej_var(data.pubkeys, pubkeys_gej, POINTS); - free(pubkeys_gej); + rustsecp256k1zkp_v0_4_0_ge_set_all_gej_var(data.pubkeys, data.pubkeys_gej, POINTS); + + + /* Initialize offset1 and offset2 */ + hash_into_offset(&data, 0); + run_ecmult_bench(&data, iters); for (i = 1; i <= 8; ++i) { - run_test(&data, i, 1, iters); + run_ecmult_multi_bench(&data, i, 1, iters); } /* This is disabled with low count of iterations because the loop runs 77 times even with iters=1 @@ -195,7 +347,7 @@ int main(int argc, char **argv) { if (iters > 2) { for (p = 0; p <= 11; ++p) { for (i = 9; i <= 16; ++i) { - run_test(&data, i << p, 1, iters); + run_ecmult_multi_bench(&data, i << p, 1, iters); } } } @@ -206,6 +358,7 @@ int main(int argc, char **argv) { rustsecp256k1zkp_v0_4_0_context_destroy(data.ctx); free(data.scalars); free(data.pubkeys); + free(data.pubkeys_gej); free(data.seckeys); free(data.output); free(data.expected_output); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_internal.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_internal.c index 001d00bf..a5b125fa 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_internal.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_internal.c @@ -5,19 +5,18 @@ ***********************************************************************/ #include -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #include "hash_impl.h" -#include "num_impl.h" #include "field_impl.h" #include "group_impl.h" #include "scalar_impl.h" #include "ecmult_const_impl.h" #include "ecmult_impl.h" #include "bench.h" -#include "secp256k1.c" typedef struct { rustsecp256k1zkp_v0_4_0_scalar scalar[2]; @@ -99,15 +98,6 @@ void bench_scalar_negate(void* arg, int iters) { } } -void bench_scalar_sqr(void* arg, int iters) { - int i; - bench_inv *data = (bench_inv*)arg; - - for (i = 0; i < iters; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(&data->scalar[0], &data->scalar[0]); - } -} - void bench_scalar_mul(void* arg, int iters) { int i; bench_inv *data = (bench_inv*)arg; @@ -369,35 +359,16 @@ void bench_context_sign(void* arg, int iters) { } } -#ifndef USE_NUM_NONE -void bench_num_jacobi(void* arg, int iters) { - int i, j = 0; - bench_inv *data = (bench_inv*)arg; - rustsecp256k1zkp_v0_4_0_num nx, na, norder; - - rustsecp256k1zkp_v0_4_0_scalar_get_num(&nx, &data->scalar[0]); - rustsecp256k1zkp_v0_4_0_scalar_order_get_num(&norder); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&na, &data->scalar[1]); - - for (i = 0; i < iters; i++) { - j += rustsecp256k1zkp_v0_4_0_num_jacobi(&nx, &norder); - rustsecp256k1zkp_v0_4_0_num_add(&nx, &nx, &na); - } - CHECK(j <= iters); -} -#endif - int main(int argc, char **argv) { bench_inv data; int iters = get_iters(20000); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "add")) run_benchmark("scalar_add", bench_scalar_add, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "negate")) run_benchmark("scalar_negate", bench_scalar_negate, bench_setup, NULL, &data, 10, iters*100); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "sqr")) run_benchmark("scalar_sqr", bench_scalar_sqr, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "mul")) run_benchmark("scalar_mul", bench_scalar_mul, bench_setup, NULL, &data, 10, iters*10); if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "split")) run_benchmark("scalar_split", bench_scalar_split, bench_setup, NULL, &data, 10, iters); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, 2000); - if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, 2000); + if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse", bench_scalar_inverse, bench_setup, NULL, &data, 10, iters); + if (have_flag(argc, argv, "scalar") || have_flag(argc, argv, "inverse")) run_benchmark("scalar_inverse_var", bench_scalar_inverse_var, bench_setup, NULL, &data, 10, iters); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize", bench_field_normalize, bench_setup, NULL, &data, 10, iters*100); if (have_flag(argc, argv, "field") || have_flag(argc, argv, "normalize")) run_benchmark("field_normalize_weak", bench_field_normalize_weak, bench_setup, NULL, &data, 10, iters*100); @@ -424,8 +395,5 @@ int main(int argc, char **argv) { if (have_flag(argc, argv, "context") || have_flag(argc, argv, "verify")) run_benchmark("context_verify", bench_context_verify, bench_setup, NULL, &data, 10, 1 + iters/1000); if (have_flag(argc, argv, "context") || have_flag(argc, argv, "sign")) run_benchmark("context_sign", bench_context_sign, bench_setup, NULL, &data, 10, 1 + iters/100); -#ifndef USE_NUM_NONE - if (have_flag(argc, argv, "num") || have_flag(argc, argv, "jacobi")) run_benchmark("num_jacobi", bench_num_jacobi, bench_setup, NULL, &data, 10, iters*10); -#endif return 0; } diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_recover.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_recover.c index 047ca5d7..3ceaca74 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_recover.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_recover.c @@ -4,8 +4,8 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_recovery.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_recovery.h" #include "util.h" #include "bench.h" diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_schnorrsig.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_schnorrsig.c index 056f7a74..d4fe3a2f 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_schnorrsig.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_schnorrsig.c @@ -8,11 +8,13 @@ #include -#include "include/secp256k1.h" -#include "include/secp256k1_schnorrsig.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_schnorrsig.h" #include "util.h" #include "bench.h" +#define MSGLEN 32 + typedef struct { rustsecp256k1zkp_v0_4_0_context *ctx; int n; @@ -26,13 +28,13 @@ typedef struct { void bench_schnorrsig_sign(void* arg, int iters) { bench_schnorrsig_data *data = (bench_schnorrsig_data *)arg; int i; - unsigned char msg[32] = "benchmarkexamplemessagetemplate"; + unsigned char msg[MSGLEN] = {0}; unsigned char sig[64]; for (i = 0; i < iters; i++) { msg[0] = i; msg[1] = i >> 8; - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(data->ctx, sig, msg, data->keypairs[i], NULL, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(data->ctx, sig, msg, MSGLEN, data->keypairs[i], NULL)); } } @@ -43,7 +45,7 @@ void bench_schnorrsig_verify(void* arg, int iters) { for (i = 0; i < iters; i++) { rustsecp256k1zkp_v0_4_0_xonly_pubkey pk; CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(data->ctx, &pk, data->pk[i]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], &pk)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(data->ctx, data->sigs[i], data->msgs[i], MSGLEN, &pk)); } } @@ -58,9 +60,10 @@ int main(void) { data.msgs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); data.sigs = (const unsigned char **)malloc(iters * sizeof(unsigned char *)); + CHECK(MSGLEN >= 4); for (i = 0; i < iters; i++) { unsigned char sk[32]; - unsigned char *msg = (unsigned char *)malloc(32); + unsigned char *msg = (unsigned char *)malloc(MSGLEN); unsigned char *sig = (unsigned char *)malloc(64); rustsecp256k1zkp_v0_4_0_keypair *keypair = (rustsecp256k1zkp_v0_4_0_keypair *)malloc(sizeof(*keypair)); unsigned char *pk_char = (unsigned char *)malloc(32); @@ -69,7 +72,7 @@ int main(void) { msg[1] = sk[1] = i >> 8; msg[2] = sk[2] = i >> 16; msg[3] = sk[3] = i >> 24; - memset(&msg[4], 'm', 28); + memset(&msg[4], 'm', MSGLEN - 4); memset(&sk[4], 's', 28); data.keypairs[i] = keypair; @@ -78,7 +81,7 @@ int main(void) { data.sigs[i] = sig; CHECK(rustsecp256k1zkp_v0_4_0_keypair_create(data.ctx, keypair, sk)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(data.ctx, sig, msg, keypair, NULL, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(data.ctx, sig, msg, MSGLEN, keypair, NULL)); CHECK(rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(data.ctx, &pk, NULL, keypair)); CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(data.ctx, pk_char, &pk) == 1); } diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_sign.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_sign.c index 680d9aa7..016ba085 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_sign.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_sign.c @@ -4,7 +4,7 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "util.h" #include "bench.h" diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_verify.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_verify.c index 9ea8dafa..fdd6c7d4 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_verify.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_verify.c @@ -7,7 +7,7 @@ #include #include -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "util.h" #include "bench.h" diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/bench_whitelist.c b/secp256k1-zkp-sys/depend/secp256k1/src/bench_whitelist.c index 9fcb7f3f..35a67a69 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/bench_whitelist.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/bench_whitelist.c @@ -11,7 +11,6 @@ #include "util.h" #include "bench.h" #include "hash_impl.h" -#include "num_impl.h" #include "scalar_impl.h" #include "testrand_impl.h" diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/ecdsa_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/ecdsa_impl.h index d8acd534..ea2d9be3 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/ecdsa_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/ecdsa_impl.h @@ -140,7 +140,7 @@ static int rustsecp256k1zkp_v0_4_0_der_parse_integer(rustsecp256k1zkp_v0_4_0_sca overflow = 1; } if (!overflow) { - memcpy(ra + 32 - rlen, *sig, rlen); + if (rlen) memcpy(ra + 32 - rlen, *sig, rlen); rustsecp256k1zkp_v0_4_0_scalar_set_b32(r, ra, &overflow); } if (overflow) { diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/ecmult.h b/secp256k1-zkp-sys/depend/secp256k1/src/ecmult.h index abd187e6..8df2d229 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/ecmult.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/ecmult.h @@ -7,7 +7,6 @@ #ifndef SECP256K1_ECMULT_H #define SECP256K1_ECMULT_H -#include "num.h" #include "group.h" #include "scalar.h" #include "scratch.h" @@ -18,7 +17,6 @@ typedef struct { rustsecp256k1zkp_v0_4_0_ge_storage (*pre_g_128)[]; /* odd multiples of 2^128*generator */ } rustsecp256k1zkp_v0_4_0_ecmult_context; -static const size_t SECP256K1_ECMULT_CONTEXT_PREALLOCATED_SIZE; static void rustsecp256k1zkp_v0_4_0_ecmult_context_init(rustsecp256k1zkp_v0_4_0_ecmult_context *ctx); static void rustsecp256k1zkp_v0_4_0_ecmult_context_build(rustsecp256k1zkp_v0_4_0_ecmult_context *ctx, void **prealloc); static void rustsecp256k1zkp_v0_4_0_ecmult_context_finalize_memcpy(rustsecp256k1zkp_v0_4_0_ecmult_context *dst, const rustsecp256k1zkp_v0_4_0_ecmult_context *src); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/ecmult_gen.h b/secp256k1-zkp-sys/depend/secp256k1/src/ecmult_gen.h index 5ebd339c..94a49ae4 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/ecmult_gen.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/ecmult_gen.h @@ -35,7 +35,6 @@ typedef struct { rustsecp256k1zkp_v0_4_0_gej initial; } rustsecp256k1zkp_v0_4_0_ecmult_gen_context; -static const size_t SECP256K1_ECMULT_GEN_CONTEXT_PREALLOCATED_SIZE; static void rustsecp256k1zkp_v0_4_0_ecmult_gen_context_init(rustsecp256k1zkp_v0_4_0_ecmult_gen_context* ctx); static void rustsecp256k1zkp_v0_4_0_ecmult_gen_context_build(rustsecp256k1zkp_v0_4_0_ecmult_gen_context* ctx, void **prealloc); static void rustsecp256k1zkp_v0_4_0_ecmult_gen_context_finalize_memcpy(rustsecp256k1zkp_v0_4_0_ecmult_gen_context *dst, const rustsecp256k1zkp_v0_4_0_ecmult_gen_context* src); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/field.h b/secp256k1-zkp-sys/depend/secp256k1/src/field.h index 34583207..605aae51 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/field.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/field.h @@ -43,13 +43,12 @@ static void rustsecp256k1zkp_v0_4_0_fe_normalize_weak(rustsecp256k1zkp_v0_4_0_fe /** Normalize a field element, without constant-time guarantee. */ static void rustsecp256k1zkp_v0_4_0_fe_normalize_var(rustsecp256k1zkp_v0_4_0_fe *r); -/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field - * implementation may optionally normalize the input, but this should not be relied upon. */ -static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(rustsecp256k1zkp_v0_4_0_fe *r); +/** Verify whether a field element represents zero i.e. would normalize to a zero value. */ +static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(const rustsecp256k1zkp_v0_4_0_fe *r); -/** Verify whether a field element represents zero i.e. would normalize to a zero value. The field - * implementation may optionally normalize the input, but this should not be relied upon. */ -static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1zkp_v0_4_0_fe *r); +/** Verify whether a field element represents zero i.e. would normalize to a zero value, + * without constant-time guarantee. */ +static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(const rustsecp256k1zkp_v0_4_0_fe *r); /** Set a field element equal to a small integer. Resulting field element is normalized. */ static void rustsecp256k1zkp_v0_4_0_fe_set_int(rustsecp256k1zkp_v0_4_0_fe *r, int a); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/field_10x26_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/field_10x26_impl.h index 79b4d496..233e4be4 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/field_10x26_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/field_10x26_impl.h @@ -9,6 +9,7 @@ #include "util.h" #include "field.h" +#include "modinv32_impl.h" #ifdef VERIFY static void rustsecp256k1zkp_v0_4_0_fe_verify(const rustsecp256k1zkp_v0_4_0_fe *a) { @@ -181,7 +182,7 @@ static void rustsecp256k1zkp_v0_4_0_fe_normalize_var(rustsecp256k1zkp_v0_4_0_fe #endif } -static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(rustsecp256k1zkp_v0_4_0_fe *r) { +static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(const rustsecp256k1zkp_v0_4_0_fe *r) { uint32_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4], t5 = r->n[5], t6 = r->n[6], t7 = r->n[7], t8 = r->n[8], t9 = r->n[9]; @@ -210,7 +211,7 @@ static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(rustsecp256k1zkp_v0_4_0 return (z0 == 0) | (z1 == 0x3FFFFFFUL); } -static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1zkp_v0_4_0_fe *r) { +static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(const rustsecp256k1zkp_v0_4_0_fe *r) { uint32_t t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; uint32_t z0, z1; uint32_t x; @@ -1164,4 +1165,92 @@ static SECP256K1_INLINE void rustsecp256k1zkp_v0_4_0_fe_from_storage(rustsecp256 #endif } +static void rustsecp256k1zkp_v0_4_0_fe_from_signed30(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_modinv32_signed30 *a) { + const uint32_t M26 = UINT32_MAX >> 6; + const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], + a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; + + /* The output from rustsecp256k1zkp_v0_4_0_modinv32{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). + */ + VERIFY_CHECK(a0 >> 30 == 0); + VERIFY_CHECK(a1 >> 30 == 0); + VERIFY_CHECK(a2 >> 30 == 0); + VERIFY_CHECK(a3 >> 30 == 0); + VERIFY_CHECK(a4 >> 30 == 0); + VERIFY_CHECK(a5 >> 30 == 0); + VERIFY_CHECK(a6 >> 30 == 0); + VERIFY_CHECK(a7 >> 30 == 0); + VERIFY_CHECK(a8 >> 16 == 0); + + r->n[0] = a0 & M26; + r->n[1] = (a0 >> 26 | a1 << 4) & M26; + r->n[2] = (a1 >> 22 | a2 << 8) & M26; + r->n[3] = (a2 >> 18 | a3 << 12) & M26; + r->n[4] = (a3 >> 14 | a4 << 16) & M26; + r->n[5] = (a4 >> 10 | a5 << 20) & M26; + r->n[6] = (a5 >> 6 | a6 << 24) & M26; + r->n[7] = (a6 >> 2 ) & M26; + r->n[8] = (a6 >> 28 | a7 << 2) & M26; + r->n[9] = (a7 >> 24 | a8 << 6); + +#ifdef VERIFY + r->magnitude = 1; + r->normalized = 1; + rustsecp256k1zkp_v0_4_0_fe_verify(r); +#endif +} + +static void rustsecp256k1zkp_v0_4_0_fe_to_signed30(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *r, const rustsecp256k1zkp_v0_4_0_fe *a) { + const uint32_t M30 = UINT32_MAX >> 2; + const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4], + a5 = a->n[5], a6 = a->n[6], a7 = a->n[7], a8 = a->n[8], a9 = a->n[9]; + +#ifdef VERIFY + VERIFY_CHECK(a->normalized); +#endif + + r->v[0] = (a0 | a1 << 26) & M30; + r->v[1] = (a1 >> 4 | a2 << 22) & M30; + r->v[2] = (a2 >> 8 | a3 << 18) & M30; + r->v[3] = (a3 >> 12 | a4 << 14) & M30; + r->v[4] = (a4 >> 16 | a5 << 10) & M30; + r->v[5] = (a5 >> 20 | a6 << 6) & M30; + r->v[6] = (a6 >> 24 | a7 << 2 + | a8 << 28) & M30; + r->v[7] = (a8 >> 2 | a9 << 24) & M30; + r->v[8] = a9 >> 6; +} + +static const rustsecp256k1zkp_v0_4_0_modinv32_modinfo rustsecp256k1zkp_v0_4_0_const_modinfo_fe = { + {{-0x3D1, -4, 0, 0, 0, 0, 0, 0, 65536}}, + 0x2DDACACFL +}; + +static void rustsecp256k1zkp_v0_4_0_fe_inv(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_fe *x) { + rustsecp256k1zkp_v0_4_0_fe tmp; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 s; + + tmp = *x; + rustsecp256k1zkp_v0_4_0_fe_normalize(&tmp); + rustsecp256k1zkp_v0_4_0_fe_to_signed30(&s, &tmp); + rustsecp256k1zkp_v0_4_0_modinv32(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_fe); + rustsecp256k1zkp_v0_4_0_fe_from_signed30(r, &s); + + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(r) == rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(&tmp)); +} + +static void rustsecp256k1zkp_v0_4_0_fe_inv_var(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_fe *x) { + rustsecp256k1zkp_v0_4_0_fe tmp; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 s; + + tmp = *x; + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&tmp); + rustsecp256k1zkp_v0_4_0_fe_to_signed30(&s, &tmp); + rustsecp256k1zkp_v0_4_0_modinv32_var(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_fe); + rustsecp256k1zkp_v0_4_0_fe_from_signed30(r, &s); + + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(r) == rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(&tmp)); +} + #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/field_5x52_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/field_5x52_impl.h index 1d806583..a0f345e7 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/field_5x52_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/field_5x52_impl.h @@ -13,6 +13,7 @@ #include "util.h" #include "field.h" +#include "modinv64_impl.h" #if defined(USE_ASM_X86_64) #include "field_5x52_asm_impl.h" @@ -161,7 +162,7 @@ static void rustsecp256k1zkp_v0_4_0_fe_normalize_var(rustsecp256k1zkp_v0_4_0_fe #endif } -static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(rustsecp256k1zkp_v0_4_0_fe *r) { +static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(const rustsecp256k1zkp_v0_4_0_fe *r) { uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4]; /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */ @@ -184,7 +185,7 @@ static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(rustsecp256k1zkp_v0_4_0 return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL); } -static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(rustsecp256k1zkp_v0_4_0_fe *r) { +static int rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(const rustsecp256k1zkp_v0_4_0_fe *r) { uint64_t t0, t1, t2, t3, t4; uint64_t z0, z1; uint64_t x; @@ -498,4 +499,80 @@ static SECP256K1_INLINE void rustsecp256k1zkp_v0_4_0_fe_from_storage(rustsecp256 #endif } +static void rustsecp256k1zkp_v0_4_0_fe_from_signed62(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_modinv64_signed62 *a) { + const uint64_t M52 = UINT64_MAX >> 12; + const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; + + /* The output from rustsecp256k1zkp_v0_4_0_modinv64{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). + */ + VERIFY_CHECK(a0 >> 62 == 0); + VERIFY_CHECK(a1 >> 62 == 0); + VERIFY_CHECK(a2 >> 62 == 0); + VERIFY_CHECK(a3 >> 62 == 0); + VERIFY_CHECK(a4 >> 8 == 0); + + r->n[0] = a0 & M52; + r->n[1] = (a0 >> 52 | a1 << 10) & M52; + r->n[2] = (a1 >> 42 | a2 << 20) & M52; + r->n[3] = (a2 >> 32 | a3 << 30) & M52; + r->n[4] = (a3 >> 22 | a4 << 40); + +#ifdef VERIFY + r->magnitude = 1; + r->normalized = 1; + rustsecp256k1zkp_v0_4_0_fe_verify(r); +#endif +} + +static void rustsecp256k1zkp_v0_4_0_fe_to_signed62(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *r, const rustsecp256k1zkp_v0_4_0_fe *a) { + const uint64_t M62 = UINT64_MAX >> 2; + const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4]; + +#ifdef VERIFY + VERIFY_CHECK(a->normalized); +#endif + + r->v[0] = (a0 | a1 << 52) & M62; + r->v[1] = (a1 >> 10 | a2 << 42) & M62; + r->v[2] = (a2 >> 20 | a3 << 32) & M62; + r->v[3] = (a3 >> 30 | a4 << 22) & M62; + r->v[4] = a4 >> 40; +} + +static const rustsecp256k1zkp_v0_4_0_modinv64_modinfo rustsecp256k1zkp_v0_4_0_const_modinfo_fe = { + {{-0x1000003D1LL, 0, 0, 0, 256}}, + 0x27C7F6E22DDACACFLL +}; + +static void rustsecp256k1zkp_v0_4_0_fe_inv(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_fe *x) { + rustsecp256k1zkp_v0_4_0_fe tmp; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 s; + + tmp = *x; + rustsecp256k1zkp_v0_4_0_fe_normalize(&tmp); + rustsecp256k1zkp_v0_4_0_fe_to_signed62(&s, &tmp); + rustsecp256k1zkp_v0_4_0_modinv64(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_fe); + rustsecp256k1zkp_v0_4_0_fe_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(r) == rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(&tmp)); +#endif +} + +static void rustsecp256k1zkp_v0_4_0_fe_inv_var(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_fe *x) { + rustsecp256k1zkp_v0_4_0_fe tmp; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 s; + + tmp = *x; + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&tmp); + rustsecp256k1zkp_v0_4_0_fe_to_signed62(&s, &tmp); + rustsecp256k1zkp_v0_4_0_modinv64_var(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_fe); + rustsecp256k1zkp_v0_4_0_fe_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(r) == rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(&tmp)); +#endif +} + #endif /* SECP256K1_FIELD_REPR_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/field_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/field_impl.h index 4bfdc969..6ab59d40 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/field_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/field_impl.h @@ -12,7 +12,6 @@ #endif #include "util.h" -#include "num.h" #if defined(SECP256K1_WIDEMUL_INT128) #include "field_5x52_impl.h" @@ -136,156 +135,9 @@ static int rustsecp256k1zkp_v0_4_0_fe_sqrt(rustsecp256k1zkp_v0_4_0_fe *r, const return rustsecp256k1zkp_v0_4_0_fe_equal(&t1, a); } -static void rustsecp256k1zkp_v0_4_0_fe_inv(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_fe *a) { - rustsecp256k1zkp_v0_4_0_fe x2, x3, x6, x9, x11, x22, x44, x88, x176, x220, x223, t1; - int j; - - /** The binary representation of (p - 2) has 5 blocks of 1s, with lengths in - * { 1, 2, 22, 223 }. Use an addition chain to calculate 2^n - 1 for each block: - * [1], [2], 3, 6, 9, 11, [22], 44, 88, 176, 220, [223] - */ - - rustsecp256k1zkp_v0_4_0_fe_sqr(&x2, a); - rustsecp256k1zkp_v0_4_0_fe_mul(&x2, &x2, a); - - rustsecp256k1zkp_v0_4_0_fe_sqr(&x3, &x2); - rustsecp256k1zkp_v0_4_0_fe_mul(&x3, &x3, a); - - x6 = x3; - for (j=0; j<3; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x6, &x6); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x6, &x6, &x3); - - x9 = x6; - for (j=0; j<3; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x9, &x9); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x9, &x9, &x3); - - x11 = x9; - for (j=0; j<2; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x11, &x11); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x11, &x11, &x2); - - x22 = x11; - for (j=0; j<11; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x22, &x22); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x22, &x22, &x11); - - x44 = x22; - for (j=0; j<22; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x44, &x44); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x44, &x44, &x22); - - x88 = x44; - for (j=0; j<44; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x88, &x88); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x88, &x88, &x44); - - x176 = x88; - for (j=0; j<88; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x176, &x176); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x176, &x176, &x88); - - x220 = x176; - for (j=0; j<44; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x220, &x220); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x220, &x220, &x44); - - x223 = x220; - for (j=0; j<3; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&x223, &x223); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&x223, &x223, &x3); - - /* The final result is then assembled using a sliding window over the blocks. */ - - t1 = x223; - for (j=0; j<23; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&t1, &t1, &x22); - for (j=0; j<5; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&t1, &t1, a); - for (j=0; j<3; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1zkp_v0_4_0_fe_mul(&t1, &t1, &x2); - for (j=0; j<2; j++) { - rustsecp256k1zkp_v0_4_0_fe_sqr(&t1, &t1); - } - rustsecp256k1zkp_v0_4_0_fe_mul(r, a, &t1); -} - -static void rustsecp256k1zkp_v0_4_0_fe_inv_var(rustsecp256k1zkp_v0_4_0_fe *r, const rustsecp256k1zkp_v0_4_0_fe *a) { -#if defined(USE_FIELD_INV_BUILTIN) - rustsecp256k1zkp_v0_4_0_fe_inv(r, a); -#elif defined(USE_FIELD_INV_NUM) - rustsecp256k1zkp_v0_4_0_num n, m; - static const rustsecp256k1zkp_v0_4_0_fe negone = SECP256K1_FE_CONST( - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, - 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL, 0xFFFFFC2EUL - ); - /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ - static const unsigned char prime[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F - }; - unsigned char b[32]; - int res; - rustsecp256k1zkp_v0_4_0_fe c = *a; - rustsecp256k1zkp_v0_4_0_fe_normalize_var(&c); - rustsecp256k1zkp_v0_4_0_fe_get_b32(b, &c); - rustsecp256k1zkp_v0_4_0_num_set_bin(&n, b, 32); - rustsecp256k1zkp_v0_4_0_num_set_bin(&m, prime, 32); - rustsecp256k1zkp_v0_4_0_num_mod_inverse(&n, &n, &m); - rustsecp256k1zkp_v0_4_0_num_get_bin(b, 32, &n); - res = rustsecp256k1zkp_v0_4_0_fe_set_b32(r, b); - (void)res; - VERIFY_CHECK(res); - /* Verify the result is the (unique) valid inverse using non-GMP code. */ - rustsecp256k1zkp_v0_4_0_fe_mul(&c, &c, r); - rustsecp256k1zkp_v0_4_0_fe_add(&c, &negone); - CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(&c)); -#else -#error "Please select field inverse implementation" -#endif -} - static int rustsecp256k1zkp_v0_4_0_fe_is_quad_var(const rustsecp256k1zkp_v0_4_0_fe *a) { -#ifndef USE_NUM_NONE - unsigned char b[32]; - rustsecp256k1zkp_v0_4_0_num n; - rustsecp256k1zkp_v0_4_0_num m; - /* secp256k1 field prime, value p defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */ - static const unsigned char prime[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFE,0xFF,0xFF,0xFC,0x2F - }; - - rustsecp256k1zkp_v0_4_0_fe c = *a; - rustsecp256k1zkp_v0_4_0_fe_normalize_var(&c); - rustsecp256k1zkp_v0_4_0_fe_get_b32(b, &c); - rustsecp256k1zkp_v0_4_0_num_set_bin(&n, b, 32); - rustsecp256k1zkp_v0_4_0_num_set_bin(&m, prime, 32); - return rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &m) >= 0; -#else rustsecp256k1zkp_v0_4_0_fe r; return rustsecp256k1zkp_v0_4_0_fe_sqrt(&r, a); -#endif } static const rustsecp256k1zkp_v0_4_0_fe rustsecp256k1zkp_v0_4_0_fe_one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/gen_context.c b/secp256k1-zkp-sys/depend/secp256k1/src/gen_context.c index 6bcacded..02846fe0 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/gen_context.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/gen_context.c @@ -9,10 +9,17 @@ #if !defined(ECMULT_GEN_PREC_BITS) #include "libsecp256k1-config.h" #endif -#define USE_BASIC_CONFIG 1 -#include "basic-config.h" -#include "include/secp256k1.h" +/* We can't require the precomputed tables when creating them. */ +#undef USE_ECMULT_STATIC_PRECOMPUTATION + +/* In principle we could use ASM, but this yields only a minor speedup in + build time and it's very complicated. In particular when cross-compiling, we'd + need to build the ASM for the build and the host machine. */ +#undef USE_EXTERNAL_ASM +#undef USE_ASM_X86_64 + +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #include "field_impl.h" diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/group.h b/secp256k1-zkp-sys/depend/secp256k1/src/group.h index f91a123d..d87b54b5 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/group.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/group.h @@ -7,7 +7,6 @@ #ifndef SECP256K1_GROUP_H #define SECP256K1_GROUP_H -#include "num.h" #include "field.h" /** A group element of the secp256k1 curve, in affine coordinates. */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/group_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/group_impl.h index e6f1b521..959009c4 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/group_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/group_impl.h @@ -7,7 +7,6 @@ #ifndef SECP256K1_GROUP_IMPL_H #define SECP256K1_GROUP_IMPL_H -#include "num.h" #include "field.h" #include "group.h" @@ -101,8 +100,8 @@ static void rustsecp256k1zkp_v0_4_0_ge_set_gej(rustsecp256k1zkp_v0_4_0_ge *r, ru static void rustsecp256k1zkp_v0_4_0_ge_set_gej_var(rustsecp256k1zkp_v0_4_0_ge *r, rustsecp256k1zkp_v0_4_0_gej *a) { rustsecp256k1zkp_v0_4_0_fe z2, z3; - r->infinity = a->infinity; if (a->infinity) { + rustsecp256k1zkp_v0_4_0_ge_set_infinity(r); return; } rustsecp256k1zkp_v0_4_0_fe_inv_var(&a->z, &a->z); @@ -111,8 +110,7 @@ static void rustsecp256k1zkp_v0_4_0_ge_set_gej_var(rustsecp256k1zkp_v0_4_0_ge *r rustsecp256k1zkp_v0_4_0_fe_mul(&a->x, &a->x, &z2); rustsecp256k1zkp_v0_4_0_fe_mul(&a->y, &a->y, &z3); rustsecp256k1zkp_v0_4_0_fe_set_int(&a->z, 1); - r->x = a->x; - r->y = a->y; + rustsecp256k1zkp_v0_4_0_ge_set_xy(r, &a->x, &a->y); } static void rustsecp256k1zkp_v0_4_0_ge_set_all_gej_var(rustsecp256k1zkp_v0_4_0_ge *r, const rustsecp256k1zkp_v0_4_0_gej *a, size_t len) { @@ -121,7 +119,9 @@ static void rustsecp256k1zkp_v0_4_0_ge_set_all_gej_var(rustsecp256k1zkp_v0_4_0_g size_t last_i = SIZE_MAX; for (i = 0; i < len; i++) { - if (!a[i].infinity) { + if (a[i].infinity) { + rustsecp256k1zkp_v0_4_0_ge_set_infinity(&r[i]); + } else { /* Use destination's x coordinates as scratch space */ if (last_i == SIZE_MAX) { r[i].x = a[i].z; @@ -149,7 +149,6 @@ static void rustsecp256k1zkp_v0_4_0_ge_set_all_gej_var(rustsecp256k1zkp_v0_4_0_g r[last_i].x = u; for (i = 0; i < len; i++) { - r[i].infinity = a[i].infinity; if (!a[i].infinity) { rustsecp256k1zkp_v0_4_0_ge_set_gej_zinv(&r[i], &a[i], &r[i].x); } @@ -316,7 +315,7 @@ static void rustsecp256k1zkp_v0_4_0_gej_double_var(rustsecp256k1zkp_v0_4_0_gej * * point will be gibberish (z = 0 but infinity = 0). */ if (a->infinity) { - r->infinity = 1; + rustsecp256k1zkp_v0_4_0_gej_set_infinity(r); if (rzr != NULL) { rustsecp256k1zkp_v0_4_0_fe_set_int(rzr, 1); } diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modinv32.h b/secp256k1-zkp-sys/depend/secp256k1/src/modinv32.h new file mode 100644 index 00000000..b1b07ef1 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modinv32.h @@ -0,0 +1,42 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV32_H +#define SECP256K1_MODINV32_H + +#if defined HAVE_CONFIG_H +#include "libsecp256k1-config.h" +#endif + +#include "util.h" + +/* A signed 30-bit limb representation of integers. + * + * Its value is sum(v[i] * 2^(30*i), i=0..8). */ +typedef struct { + int32_t v[9]; +} rustsecp256k1zkp_v0_4_0_modinv32_signed30; + +typedef struct { + /* The modulus in signed30 notation, must be odd and in [3, 2^256]. */ + rustsecp256k1zkp_v0_4_0_modinv32_signed30 modulus; + + /* modulus^{-1} mod 2^30 */ + uint32_t modulus_inv30; +} rustsecp256k1zkp_v0_4_0_modinv32_modinfo; + +/* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). + * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of + * x and modulus must be 1). These rules are automatically satisfied if the modulus is prime. + * + * On output, all of x's limbs will be in [0, 2^30). + */ +static void rustsecp256k1zkp_v0_4_0_modinv32_var(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *x, const rustsecp256k1zkp_v0_4_0_modinv32_modinfo *modinfo); + +/* Same as rustsecp256k1zkp_v0_4_0_modinv32_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1zkp_v0_4_0_modinv32(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *x, const rustsecp256k1zkp_v0_4_0_modinv32_modinfo *modinfo); + +#endif /* SECP256K1_MODINV32_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modinv32_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modinv32_impl.h new file mode 100644 index 00000000..d3f3b9a7 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modinv32_impl.h @@ -0,0 +1,587 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV32_IMPL_H +#define SECP256K1_MODINV32_IMPL_H + +#include "modinv32.h" + +#include "util.h" + +#include + +/* This file implements modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. + * + * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an + * implementation for N=30, using 30-bit signed limbs represented as int32_t. + */ + +#ifdef VERIFY +static const rustsecp256k1zkp_v0_4_0_modinv32_signed30 SECP256K1_SIGNED30_ONE = {{1}}; + +/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^30). */ +static void rustsecp256k1zkp_v0_4_0_modinv32_mul_30(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *r, const rustsecp256k1zkp_v0_4_0_modinv32_signed30 *a, int alen, int32_t factor) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + int64_t c = 0; + int i; + for (i = 0; i < 8; ++i) { + if (i < alen) c += (int64_t)a->v[i] * factor; + r->v[i] = (int32_t)c & M30; c >>= 30; + } + if (8 < alen) c += (int64_t)a->v[8] * factor; + VERIFY_CHECK(c == (int32_t)c); + r->v[8] = (int32_t)c; +} + +/* Return -1 for ab*factor. A consists of alen limbs; b has 9. */ +static int rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(const rustsecp256k1zkp_v0_4_0_modinv32_signed30 *a, int alen, const rustsecp256k1zkp_v0_4_0_modinv32_signed30 *b, int32_t factor) { + int i; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 am, bm; + rustsecp256k1zkp_v0_4_0_modinv32_mul_30(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1zkp_v0_4_0_modinv32_mul_30(&bm, b, 9, factor); + for (i = 0; i < 8; ++i) { + /* Verify that all but the top limb of a and b are normalized. */ + VERIFY_CHECK(am.v[i] >> 30 == 0); + VERIFY_CHECK(bm.v[i] >> 30 == 0); + } + for (i = 8; i >= 0; --i) { + if (am.v[i] < bm.v[i]) return -1; + if (am.v[i] > bm.v[i]) return 1; + } + return 0; +} +#endif + +/* Take as input a signed30 number in range (-2*modulus,modulus), and add a multiple of the modulus + * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the + * process. The input must have limbs in range (-2^30,2^30). The output will have limbs in range + * [0,2^30). */ +static void rustsecp256k1zkp_v0_4_0_modinv32_normalize_30(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *r, int32_t sign, const rustsecp256k1zkp_v0_4_0_modinv32_modinfo *modinfo) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + int32_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4], + r5 = r->v[5], r6 = r->v[6], r7 = r->v[7], r8 = r->v[8]; + int32_t cond_add, cond_negate; + +#ifdef VERIFY + /* Verify that all limbs are in range (-2^30,2^30). */ + int i; + for (i = 0; i < 9; ++i) { + VERIFY_CHECK(r->v[i] >= -M30); + VERIFY_CHECK(r->v[i] <= M30); + } + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif + + /* In a first step, add the modulus if the input is negative, and then negate if requested. + * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input + * limbs are in range (-2^30,2^30), this cannot overflow an int32_t. Note that the right + * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is + * indeed the behavior of the right shift operator). */ + cond_add = r8 >> 31; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + r5 += modinfo->modulus.v[5] & cond_add; + r6 += modinfo->modulus.v[6] & cond_add; + r7 += modinfo->modulus.v[7] & cond_add; + r8 += modinfo->modulus.v[8] & cond_add; + cond_negate = sign >> 31; + r0 = (r0 ^ cond_negate) - cond_negate; + r1 = (r1 ^ cond_negate) - cond_negate; + r2 = (r2 ^ cond_negate) - cond_negate; + r3 = (r3 ^ cond_negate) - cond_negate; + r4 = (r4 ^ cond_negate) - cond_negate; + r5 = (r5 ^ cond_negate) - cond_negate; + r6 = (r6 ^ cond_negate) - cond_negate; + r7 = (r7 ^ cond_negate) - cond_negate; + r8 = (r8 ^ cond_negate) - cond_negate; + /* Propagate the top bits, to bring limbs back to range (-2^30,2^30). */ + r1 += r0 >> 30; r0 &= M30; + r2 += r1 >> 30; r1 &= M30; + r3 += r2 >> 30; r2 &= M30; + r4 += r3 >> 30; r3 &= M30; + r5 += r4 >> 30; r4 &= M30; + r6 += r5 >> 30; r5 &= M30; + r7 += r6 >> 30; r6 &= M30; + r8 += r7 >> 30; r7 &= M30; + + /* In a second step add the modulus again if the result is still negative, bringing r to range + * [0,modulus). */ + cond_add = r8 >> 31; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + r5 += modinfo->modulus.v[5] & cond_add; + r6 += modinfo->modulus.v[6] & cond_add; + r7 += modinfo->modulus.v[7] & cond_add; + r8 += modinfo->modulus.v[8] & cond_add; + /* And propagate again. */ + r1 += r0 >> 30; r0 &= M30; + r2 += r1 >> 30; r1 &= M30; + r3 += r2 >> 30; r2 &= M30; + r4 += r3 >> 30; r3 &= M30; + r5 += r4 >> 30; r4 &= M30; + r6 += r5 >> 30; r5 &= M30; + r7 += r6 >> 30; r6 &= M30; + r8 += r7 >> 30; r7 &= M30; + + r->v[0] = r0; + r->v[1] = r1; + r->v[2] = r2; + r->v[3] = r3; + r->v[4] = r4; + r->v[5] = r5; + r->v[6] = r6; + r->v[7] = r7; + r->v[8] = r8; + +#ifdef VERIFY + VERIFY_CHECK(r0 >> 30 == 0); + VERIFY_CHECK(r1 >> 30 == 0); + VERIFY_CHECK(r2 >> 30 == 0); + VERIFY_CHECK(r3 >> 30 == 0); + VERIFY_CHECK(r4 >> 30 == 0); + VERIFY_CHECK(r5 >> 30 == 0); + VERIFY_CHECK(r6 >> 30 == 0); + VERIFY_CHECK(r7 >> 30 == 0); + VERIFY_CHECK(r8 >> 30 == 0); + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(r, 9, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif +} + +/* Data type for transition matrices (see section 3 of explanation). + * + * t = [ u v ] + * [ q r ] + */ +typedef struct { + int32_t u, v, q, r; +} rustsecp256k1zkp_v0_4_0_modinv32_trans2x2; + +/* Compute the transition matrix and zeta for 30 divsteps. + * + * Input: zeta: initial zeta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final zeta + * + * Implements the divsteps_n_matrix function from the explanation. + */ +static int32_t rustsecp256k1zkp_v0_4_0_modinv32_divsteps_30(int32_t zeta, uint32_t f0, uint32_t g0, rustsecp256k1zkp_v0_4_0_modinv32_trans2x2 *t) { + /* u,v,q,r are the elements of the transformation matrix being built up, + * starting with the identity matrix. Semantically they are signed integers + * in range [-2^30,2^30], but here represented as unsigned mod 2^32. This + * permits left shifting (which is UB for negative numbers). The range + * being inside [-2^31,2^31) means that casting to signed works correctly. + */ + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 0; i < 30; ++i) { + VERIFY_CHECK((f & 1) == 1); /* f must always be odd */ + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + /* Compute conditional masks for (zeta < 0) and for (g & 1). */ + c1 = zeta >> 31; + c2 = -(g & 1); + /* Compute x,y,z, conditionally negated versions of f,u,v. */ + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + /* Conditionally add x,y,z to g,q,r. */ + g += x & c2; + q += y & c2; + r += z & c2; + /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */ + c1 &= c2; + /* Conditionally change zeta into -zeta-2 or zeta-1. */ + zeta = (zeta ^ c1) - 1; + /* Conditionally add g,q,r to f,u,v. */ + f += g & c1; + u += q & c1; + v += r & c1; + /* Shifts */ + g >>= 1; + u <<= 1; + v <<= 1; + /* Bounds on zeta that follow from the bounds on iteration count (max 20*30 divsteps). */ + VERIFY_CHECK(zeta >= -601 && zeta <= 601); + } + /* Return data in t and return value. */ + t->u = (int32_t)u; + t->v = (int32_t)v; + t->q = (int32_t)q; + t->r = (int32_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 30 of them will have determinant 2^30. */ + VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30); + return zeta; +} + +/* Compute the transition matrix and eta for 30 divsteps (variable time). + * + * Input: eta: initial eta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final eta + * + * Implements the divsteps_n_matrix_var function from the explanation. + */ +static int32_t rustsecp256k1zkp_v0_4_0_modinv32_divsteps_30_var(int32_t eta, uint32_t f0, uint32_t g0, rustsecp256k1zkp_v0_4_0_modinv32_trans2x2 *t) { + /* inv256[i] = -(2*i+1)^-1 (mod 256) */ + static const uint8_t inv256[128] = { + 0xFF, 0x55, 0x33, 0x49, 0xC7, 0x5D, 0x3B, 0x11, 0x0F, 0xE5, 0xC3, 0x59, + 0xD7, 0xED, 0xCB, 0x21, 0x1F, 0x75, 0x53, 0x69, 0xE7, 0x7D, 0x5B, 0x31, + 0x2F, 0x05, 0xE3, 0x79, 0xF7, 0x0D, 0xEB, 0x41, 0x3F, 0x95, 0x73, 0x89, + 0x07, 0x9D, 0x7B, 0x51, 0x4F, 0x25, 0x03, 0x99, 0x17, 0x2D, 0x0B, 0x61, + 0x5F, 0xB5, 0x93, 0xA9, 0x27, 0xBD, 0x9B, 0x71, 0x6F, 0x45, 0x23, 0xB9, + 0x37, 0x4D, 0x2B, 0x81, 0x7F, 0xD5, 0xB3, 0xC9, 0x47, 0xDD, 0xBB, 0x91, + 0x8F, 0x65, 0x43, 0xD9, 0x57, 0x6D, 0x4B, 0xA1, 0x9F, 0xF5, 0xD3, 0xE9, + 0x67, 0xFD, 0xDB, 0xB1, 0xAF, 0x85, 0x63, 0xF9, 0x77, 0x8D, 0x6B, 0xC1, + 0xBF, 0x15, 0xF3, 0x09, 0x87, 0x1D, 0xFB, 0xD1, 0xCF, 0xA5, 0x83, 0x19, + 0x97, 0xAD, 0x8B, 0xE1, 0xDF, 0x35, 0x13, 0x29, 0xA7, 0x3D, 0x1B, 0xF1, + 0xEF, 0xC5, 0xA3, 0x39, 0xB7, 0xCD, 0xAB, 0x01 + }; + + /* Transformation matrix; see comments in rustsecp256k1zkp_v0_4_0_modinv32_divsteps_30. */ + uint32_t u = 1, v = 0, q = 0, r = 1; + uint32_t f = f0, g = g0, m; + uint16_t w; + int i = 30, limit, zeros; + + for (;;) { + /* Use a sentinel bit to count zeros only up to i. */ + zeros = rustsecp256k1zkp_v0_4_0_ctz32_var(g | (UINT32_MAX << i)); + /* Perform zeros divsteps at once; they all just divide g by two. */ + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + /* We're done once we've done 30 divsteps. */ + if (i == 0) break; + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (30 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (30 - i)); + /* Bounds on eta that follow from the bounds on iteration count (max 25*30 divsteps). */ + VERIFY_CHECK(eta >= -751 && eta <= 751); + /* If eta is negative, negate it and replace f,g with g,-f. */ + if (eta < 0) { + uint32_t tmp; + eta = -eta; + tmp = f; f = g; g = -tmp; + tmp = u; u = q; q = -tmp; + tmp = v; v = r; r = -tmp; + } + /* eta is now >= 0. In what follows we're going to cancel out the bottom bits of g. No more + * than i can be cancelled out (as we'd be done before that point), and no more than eta+1 + * can be done as its sign will flip once that happens. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + /* m is a mask for the bottom min(limit, 8) bits (our table only supports 8 bits). */ + VERIFY_CHECK(limit > 0 && limit <= 30); + m = (UINT32_MAX >> (32 - limit)) & 255U; + /* Find what multiple of f must be added to g to cancel its bottom min(limit, 8) bits. */ + w = (g * inv256[(f >> 1) & 127]) & m; + /* Do so. */ + g += f * w; + q += u * w; + r += v * w; + VERIFY_CHECK((g & m) == 0); + } + /* Return data in t and return value. */ + t->u = (int32_t)u; + t->v = (int32_t)v; + t->q = (int32_t)q; + t->r = (int32_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 30 of them will have determinant 2^30. */ + VERIFY_CHECK((int64_t)t->u * t->r - (int64_t)t->v * t->q == ((int64_t)1) << 30); + return eta; +} + +/* Compute (t/2^30) * [d, e] mod modulus, where t is a transition matrix for 30 divsteps. + * + * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range + * (-2^30,2^30). + * + * This implements the update_de function from the explanation. + */ +static void rustsecp256k1zkp_v0_4_0_modinv32_update_de_30(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *d, rustsecp256k1zkp_v0_4_0_modinv32_signed30 *e, const rustsecp256k1zkp_v0_4_0_modinv32_trans2x2 *t, const rustsecp256k1zkp_v0_4_0_modinv32_modinfo* modinfo) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t->u, v = t->v, q = t->q, r = t->r; + int32_t di, ei, md, me, sd, se; + int64_t cd, ce; + int i; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK((labs(u) + labs(v)) >= 0); /* |u|+|v| doesn't overflow */ + VERIFY_CHECK((labs(q) + labs(r)) >= 0); /* |q|+|r| doesn't overflow */ + VERIFY_CHECK((labs(u) + labs(v)) <= M30 + 1); /* |u|+|v| <= 2^30 */ + VERIFY_CHECK((labs(q) + labs(r)) <= M30 + 1); /* |q|+|r| <= 2^30 */ +#endif + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ + sd = d->v[8] >> 31; + se = e->v[8] >> 31; + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + /* Begin computing t*[d,e]. */ + di = d->v[0]; + ei = e->v[0]; + cd = (int64_t)u * di + (int64_t)v * ei; + ce = (int64_t)q * di + (int64_t)r * ei; + /* Correct md,me so that t*[d,e]+modulus*[md,me] has 30 zero bottom bits. */ + md -= (modinfo->modulus_inv30 * (uint32_t)cd + md) & M30; + me -= (modinfo->modulus_inv30 * (uint32_t)ce + me) & M30; + /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ + cd += (int64_t)modinfo->modulus.v[0] * md; + ce += (int64_t)modinfo->modulus.v[0] * me; + /* Verify that the low 30 bits of the computation are indeed zero, and then throw them away. */ + VERIFY_CHECK(((int32_t)cd & M30) == 0); cd >>= 30; + VERIFY_CHECK(((int32_t)ce & M30) == 0); ce >>= 30; + /* Now iteratively compute limb i=1..8 of t*[d,e]+modulus*[md,me], and store them in output + * limb i-1 (shifting down by 30 bits). */ + for (i = 1; i < 9; ++i) { + di = d->v[i]; + ei = e->v[i]; + cd += (int64_t)u * di + (int64_t)v * ei; + ce += (int64_t)q * di + (int64_t)r * ei; + cd += (int64_t)modinfo->modulus.v[i] * md; + ce += (int64_t)modinfo->modulus.v[i] * me; + d->v[i - 1] = (int32_t)cd & M30; cd >>= 30; + e->v[i - 1] = (int32_t)ce & M30; ce >>= 30; + } + /* What remains is limb 9 of t*[d,e]+modulus*[md,me]; store it as output limb 8. */ + d->v[8] = (int32_t)cd; + e->v[8] = (int32_t)ce; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(d, 9, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(e, 9, &modinfo->modulus, 1) < 0); /* e < modulus */ +#endif +} + +/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. + * + * This implements the update_fg function from the explanation. + */ +static void rustsecp256k1zkp_v0_4_0_modinv32_update_fg_30(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *f, rustsecp256k1zkp_v0_4_0_modinv32_signed30 *g, const rustsecp256k1zkp_v0_4_0_modinv32_trans2x2 *t) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t->u, v = t->v, q = t->q, r = t->r; + int32_t fi, gi; + int64_t cf, cg; + int i; + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + /* Verify that the bottom 30 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + /* Now iteratively compute limb i=1..8 of t*[f,g], and store them in output limb i-1 (shifting + * down by 30 bits). */ + for (i = 1; i < 9; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + f->v[i - 1] = (int32_t)cf & M30; cf >>= 30; + g->v[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + /* What remains is limb 9 of t*[f,g]; store it as output limb 8. */ + f->v[8] = (int32_t)cf; + g->v[8] = (int32_t)cg; +} + +/* Compute (t/2^30) * [f, g], where t is a transition matrix for 30 divsteps. + * + * Version that operates on a variable number of limbs in f and g. + * + * This implements the update_fg function from the explanation in modinv64_impl.h. + */ +static void rustsecp256k1zkp_v0_4_0_modinv32_update_fg_30_var(int len, rustsecp256k1zkp_v0_4_0_modinv32_signed30 *f, rustsecp256k1zkp_v0_4_0_modinv32_signed30 *g, const rustsecp256k1zkp_v0_4_0_modinv32_trans2x2 *t) { + const int32_t M30 = (int32_t)(UINT32_MAX >> 2); + const int32_t u = t->u, v = t->v, q = t->q, r = t->r; + int32_t fi, gi; + int64_t cf, cg; + int i; + VERIFY_CHECK(len > 0); + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int64_t)u * fi + (int64_t)v * gi; + cg = (int64_t)q * fi + (int64_t)r * gi; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int32_t)cf & M30) == 0); cf >>= 30; + VERIFY_CHECK(((int32_t)cg & M30) == 0); cg >>= 30; + /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting + * down by 30 bits). */ + for (i = 1; i < len; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int64_t)u * fi + (int64_t)v * gi; + cg += (int64_t)q * fi + (int64_t)r * gi; + f->v[i - 1] = (int32_t)cf & M30; cf >>= 30; + g->v[i - 1] = (int32_t)cg & M30; cg >>= 30; + } + /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ + f->v[len - 1] = (int32_t)cf; + g->v[len - 1] = (int32_t)cg; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ +static void rustsecp256k1zkp_v0_4_0_modinv32(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *x, const rustsecp256k1zkp_v0_4_0_modinv32_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ + rustsecp256k1zkp_v0_4_0_modinv32_signed30 d = {{0}}; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 e = {{1}}; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 g = *x; + int i; + int32_t zeta = -1; /* zeta = -(delta+1/2); delta is initially 1/2. */ + + /* Do 20 iterations of 30 divsteps each = 600 divsteps. 590 suffices for 256-bit inputs. */ + for (i = 0; i < 20; ++i) { + /* Compute transition matrix and new zeta after 30 divsteps. */ + rustsecp256k1zkp_v0_4_0_modinv32_trans2x2 t; + zeta = rustsecp256k1zkp_v0_4_0_modinv32_divsteps_30(zeta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1zkp_v0_4_0_modinv32_update_de_30(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1zkp_v0_4_0_modinv32_update_fg_30(&f, &g, &t); +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, 9, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, 9, &SECP256K1_SIGNED30_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, 1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, 9, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1zkp_v0_4_0_modinv32_normalize_30(&d, f.v[8], modinfo); + *x = d; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ +static void rustsecp256k1zkp_v0_4_0_modinv32_var(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *x, const rustsecp256k1zkp_v0_4_0_modinv32_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ + rustsecp256k1zkp_v0_4_0_modinv32_signed30 d = {{0, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 e = {{1, 0, 0, 0, 0, 0, 0, 0, 0}}; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 f = modinfo->modulus; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 g = *x; +#ifdef VERIFY + int i = 0; +#endif + int j, len = 9; + int32_t eta = -1; /* eta = -delta; delta is initially 1 (faster for the variable-time code) */ + int32_t cond, fn, gn; + + /* Do iterations of 30 divsteps each until g=0. */ + while (1) { + /* Compute transition matrix and new eta after 30 divsteps. */ + rustsecp256k1zkp_v0_4_0_modinv32_trans2x2 t; + eta = rustsecp256k1zkp_v0_4_0_modinv32_divsteps_30_var(eta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1zkp_v0_4_0_modinv32_update_de_30(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1zkp_v0_4_0_modinv32_update_fg_30_var(len, &f, &g, &t); + /* If the bottom limb of g is 0, there is a chance g=0. */ + if (g.v[0] == 0) { + cond = 0; + /* Check if all other limbs are also 0. */ + for (j = 1; j < len; ++j) { + cond |= g.v[j]; + } + /* If so, we're done. */ + if (cond == 0) break; + } + + /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */ + fn = f.v[len - 1]; + gn = g.v[len - 1]; + cond = ((int32_t)len - 2) >> 31; + cond |= fn ^ (fn >> 31); + cond |= gn ^ (gn >> 31); + /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */ + if (cond == 0) { + f.v[len - 2] |= (uint32_t)fn << 30; + g.v[len - 2] |= (uint32_t)gn << 30; + --len; + } +#ifdef VERIFY + VERIFY_CHECK(++i < 25); /* We should never need more than 25*30 = 750 divsteps */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&g, len, &SECP256K1_SIGNED30_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, -1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &SECP256K1_SIGNED30_ONE, 1) == 0 || + (rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(x, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&d, 9, &SECP256K1_SIGNED30_ONE, 0) == 0 && + (rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv32_mul_cmp_30(&f, len, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1zkp_v0_4_0_modinv32_normalize_30(&d, f.v[len - 1], modinfo); + *x = d; +} + +#endif /* SECP256K1_MODINV32_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modinv64.h b/secp256k1-zkp-sys/depend/secp256k1/src/modinv64.h new file mode 100644 index 00000000..70a43d55 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modinv64.h @@ -0,0 +1,46 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV64_H +#define SECP256K1_MODINV64_H + +#if defined HAVE_CONFIG_H +#include "libsecp256k1-config.h" +#endif + +#include "util.h" + +#ifndef SECP256K1_WIDEMUL_INT128 +#error "modinv64 requires 128-bit wide multiplication support" +#endif + +/* A signed 62-bit limb representation of integers. + * + * Its value is sum(v[i] * 2^(62*i), i=0..4). */ +typedef struct { + int64_t v[5]; +} rustsecp256k1zkp_v0_4_0_modinv64_signed62; + +typedef struct { + /* The modulus in signed62 notation, must be odd and in [3, 2^256]. */ + rustsecp256k1zkp_v0_4_0_modinv64_signed62 modulus; + + /* modulus^{-1} mod 2^62 */ + uint64_t modulus_inv62; +} rustsecp256k1zkp_v0_4_0_modinv64_modinfo; + +/* Replace x with its modular inverse mod modinfo->modulus. x must be in range [0, modulus). + * If x is zero, the result will be zero as well. If not, the inverse must exist (i.e., the gcd of + * x and modulus must be 1). These rules are automatically satisfied if the modulus is prime. + * + * On output, all of x's limbs will be in [0, 2^62). + */ +static void rustsecp256k1zkp_v0_4_0_modinv64_var(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *x, const rustsecp256k1zkp_v0_4_0_modinv64_modinfo *modinfo); + +/* Same as rustsecp256k1zkp_v0_4_0_modinv64_var, but constant time in x (not in the modulus). */ +static void rustsecp256k1zkp_v0_4_0_modinv64(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *x, const rustsecp256k1zkp_v0_4_0_modinv64_modinfo *modinfo); + +#endif /* SECP256K1_MODINV64_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modinv64_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modinv64_impl.h new file mode 100644 index 00000000..f5ecf5fb --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modinv64_impl.h @@ -0,0 +1,593 @@ +/*********************************************************************** + * Copyright (c) 2020 Peter Dettman * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef SECP256K1_MODINV64_IMPL_H +#define SECP256K1_MODINV64_IMPL_H + +#include "modinv64.h" + +#include "util.h" + +/* This file implements modular inversion based on the paper "Fast constant-time gcd computation and + * modular inversion" by Daniel J. Bernstein and Bo-Yin Yang. + * + * For an explanation of the algorithm, see doc/safegcd_implementation.md. This file contains an + * implementation for N=62, using 62-bit signed limbs represented as int64_t. + */ + +#ifdef VERIFY +/* Helper function to compute the absolute value of an int64_t. + * (we don't use abs/labs/llabs as it depends on the int sizes). */ +static int64_t rustsecp256k1zkp_v0_4_0_modinv64_abs(int64_t v) { + VERIFY_CHECK(v > INT64_MIN); + if (v < 0) return -v; + return v; +} + +static const rustsecp256k1zkp_v0_4_0_modinv64_signed62 SECP256K1_SIGNED62_ONE = {{1}}; + +/* Compute a*factor and put it in r. All but the top limb in r will be in range [0,2^62). */ +static void rustsecp256k1zkp_v0_4_0_modinv64_mul_62(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *r, const rustsecp256k1zkp_v0_4_0_modinv64_signed62 *a, int alen, int64_t factor) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + int128_t c = 0; + int i; + for (i = 0; i < 4; ++i) { + if (i < alen) c += (int128_t)a->v[i] * factor; + r->v[i] = (int64_t)c & M62; c >>= 62; + } + if (4 < alen) c += (int128_t)a->v[4] * factor; + VERIFY_CHECK(c == (int64_t)c); + r->v[4] = (int64_t)c; +} + +/* Return -1 for ab*factor. A has alen limbs; b has 5. */ +static int rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(const rustsecp256k1zkp_v0_4_0_modinv64_signed62 *a, int alen, const rustsecp256k1zkp_v0_4_0_modinv64_signed62 *b, int64_t factor) { + int i; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 am, bm; + rustsecp256k1zkp_v0_4_0_modinv64_mul_62(&am, a, alen, 1); /* Normalize all but the top limb of a. */ + rustsecp256k1zkp_v0_4_0_modinv64_mul_62(&bm, b, 5, factor); + for (i = 0; i < 4; ++i) { + /* Verify that all but the top limb of a and b are normalized. */ + VERIFY_CHECK(am.v[i] >> 62 == 0); + VERIFY_CHECK(bm.v[i] >> 62 == 0); + } + for (i = 4; i >= 0; --i) { + if (am.v[i] < bm.v[i]) return -1; + if (am.v[i] > bm.v[i]) return 1; + } + return 0; +} +#endif + +/* Take as input a signed62 number in range (-2*modulus,modulus), and add a multiple of the modulus + * to it to bring it to range [0,modulus). If sign < 0, the input will also be negated in the + * process. The input must have limbs in range (-2^62,2^62). The output will have limbs in range + * [0,2^62). */ +static void rustsecp256k1zkp_v0_4_0_modinv64_normalize_62(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *r, int64_t sign, const rustsecp256k1zkp_v0_4_0_modinv64_modinfo *modinfo) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + int64_t r0 = r->v[0], r1 = r->v[1], r2 = r->v[2], r3 = r->v[3], r4 = r->v[4]; + int64_t cond_add, cond_negate; + +#ifdef VERIFY + /* Verify that all limbs are in range (-2^62,2^62). */ + int i; + for (i = 0; i < 5; ++i) { + VERIFY_CHECK(r->v[i] >= -M62); + VERIFY_CHECK(r->v[i] <= M62); + } + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, -2) > 0); /* r > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif + + /* In a first step, add the modulus if the input is negative, and then negate if requested. + * This brings r from range (-2*modulus,modulus) to range (-modulus,modulus). As all input + * limbs are in range (-2^62,2^62), this cannot overflow an int64_t. Note that the right + * shifts below are signed sign-extending shifts (see assumptions.h for tests that that is + * indeed the behavior of the right shift operator). */ + cond_add = r4 >> 63; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + cond_negate = sign >> 63; + r0 = (r0 ^ cond_negate) - cond_negate; + r1 = (r1 ^ cond_negate) - cond_negate; + r2 = (r2 ^ cond_negate) - cond_negate; + r3 = (r3 ^ cond_negate) - cond_negate; + r4 = (r4 ^ cond_negate) - cond_negate; + /* Propagate the top bits, to bring limbs back to range (-2^62,2^62). */ + r1 += r0 >> 62; r0 &= M62; + r2 += r1 >> 62; r1 &= M62; + r3 += r2 >> 62; r2 &= M62; + r4 += r3 >> 62; r3 &= M62; + + /* In a second step add the modulus again if the result is still negative, bringing + * r to range [0,modulus). */ + cond_add = r4 >> 63; + r0 += modinfo->modulus.v[0] & cond_add; + r1 += modinfo->modulus.v[1] & cond_add; + r2 += modinfo->modulus.v[2] & cond_add; + r3 += modinfo->modulus.v[3] & cond_add; + r4 += modinfo->modulus.v[4] & cond_add; + /* And propagate again. */ + r1 += r0 >> 62; r0 &= M62; + r2 += r1 >> 62; r1 &= M62; + r3 += r2 >> 62; r2 &= M62; + r4 += r3 >> 62; r3 &= M62; + + r->v[0] = r0; + r->v[1] = r1; + r->v[2] = r2; + r->v[3] = r3; + r->v[4] = r4; + +#ifdef VERIFY + VERIFY_CHECK(r0 >> 62 == 0); + VERIFY_CHECK(r1 >> 62 == 0); + VERIFY_CHECK(r2 >> 62 == 0); + VERIFY_CHECK(r3 >> 62 == 0); + VERIFY_CHECK(r4 >> 62 == 0); + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 0) >= 0); /* r >= 0 */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(r, 5, &modinfo->modulus, 1) < 0); /* r < modulus */ +#endif +} + +/* Data type for transition matrices (see section 3 of explanation). + * + * t = [ u v ] + * [ q r ] + */ +typedef struct { + int64_t u, v, q, r; +} rustsecp256k1zkp_v0_4_0_modinv64_trans2x2; + +/* Compute the transition matrix and eta for 59 divsteps (where zeta=-(delta+1/2)). + * Note that the transformation matrix is scaled by 2^62 and not 2^59. + * + * Input: zeta: initial zeta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final zeta + * + * Implements the divsteps_n_matrix function from the explanation. + */ +static int64_t rustsecp256k1zkp_v0_4_0_modinv64_divsteps_59(int64_t zeta, uint64_t f0, uint64_t g0, rustsecp256k1zkp_v0_4_0_modinv64_trans2x2 *t) { + /* u,v,q,r are the elements of the transformation matrix being built up, + * starting with the identity matrix times 8 (because the caller expects + * a result scaled by 2^62). Semantically they are signed integers + * in range [-2^62,2^62], but here represented as unsigned mod 2^64. This + * permits left shifting (which is UB for negative numbers). The range + * being inside [-2^63,2^63) means that casting to signed works correctly. + */ + uint64_t u = 8, v = 0, q = 0, r = 8; + uint64_t c1, c2, f = f0, g = g0, x, y, z; + int i; + + for (i = 3; i < 62; ++i) { + VERIFY_CHECK((f & 1) == 1); /* f must always be odd */ + VERIFY_CHECK((u * f0 + v * g0) == f << i); + VERIFY_CHECK((q * f0 + r * g0) == g << i); + /* Compute conditional masks for (zeta < 0) and for (g & 1). */ + c1 = zeta >> 63; + c2 = -(g & 1); + /* Compute x,y,z, conditionally negated versions of f,u,v. */ + x = (f ^ c1) - c1; + y = (u ^ c1) - c1; + z = (v ^ c1) - c1; + /* Conditionally add x,y,z to g,q,r. */ + g += x & c2; + q += y & c2; + r += z & c2; + /* In what follows, c1 is a condition mask for (zeta < 0) and (g & 1). */ + c1 &= c2; + /* Conditionally change zeta into -zeta-2 or zeta-1. */ + zeta = (zeta ^ c1) - 1; + /* Conditionally add g,q,r to f,u,v. */ + f += g & c1; + u += q & c1; + v += r & c1; + /* Shifts */ + g >>= 1; + u <<= 1; + v <<= 1; + /* Bounds on zeta that follow from the bounds on iteration count (max 10*59 divsteps). */ + VERIFY_CHECK(zeta >= -591 && zeta <= 591); + } + /* Return data in t and return value. */ + t->u = (int64_t)u; + t->v = (int64_t)v; + t->q = (int64_t)q; + t->r = (int64_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 59 of them will have determinant 2^59. Multiplying with the initial + * 8*identity (which has determinant 2^6) means the overall outputs has determinant + * 2^65. */ + VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 65); + return zeta; +} + +/* Compute the transition matrix and eta for 62 divsteps (variable time, eta=-delta). + * + * Input: eta: initial eta + * f0: bottom limb of initial f + * g0: bottom limb of initial g + * Output: t: transition matrix + * Return: final eta + * + * Implements the divsteps_n_matrix_var function from the explanation. + */ +static int64_t rustsecp256k1zkp_v0_4_0_modinv64_divsteps_62_var(int64_t eta, uint64_t f0, uint64_t g0, rustsecp256k1zkp_v0_4_0_modinv64_trans2x2 *t) { + /* Transformation matrix; see comments in rustsecp256k1zkp_v0_4_0_modinv64_divsteps_62. */ + uint64_t u = 1, v = 0, q = 0, r = 1; + uint64_t f = f0, g = g0, m; + uint32_t w; + int i = 62, limit, zeros; + + for (;;) { + /* Use a sentinel bit to count zeros only up to i. */ + zeros = rustsecp256k1zkp_v0_4_0_ctz64_var(g | (UINT64_MAX << i)); + /* Perform zeros divsteps at once; they all just divide g by two. */ + g >>= zeros; + u <<= zeros; + v <<= zeros; + eta -= zeros; + i -= zeros; + /* We're done once we've done 62 divsteps. */ + if (i == 0) break; + VERIFY_CHECK((f & 1) == 1); + VERIFY_CHECK((g & 1) == 1); + VERIFY_CHECK((u * f0 + v * g0) == f << (62 - i)); + VERIFY_CHECK((q * f0 + r * g0) == g << (62 - i)); + /* Bounds on eta that follow from the bounds on iteration count (max 12*62 divsteps). */ + VERIFY_CHECK(eta >= -745 && eta <= 745); + /* If eta is negative, negate it and replace f,g with g,-f. */ + if (eta < 0) { + uint64_t tmp; + eta = -eta; + tmp = f; f = g; g = -tmp; + tmp = u; u = q; q = -tmp; + tmp = v; v = r; r = -tmp; + /* Use a formula to cancel out up to 6 bits of g. Also, no more than i can be cancelled + * out (as we'd be done before that point), and no more than eta+1 can be done as its + * will flip again once that happens. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + VERIFY_CHECK(limit > 0 && limit <= 62); + /* m is a mask for the bottom min(limit, 6) bits. */ + m = (UINT64_MAX >> (64 - limit)) & 63U; + /* Find what multiple of f must be added to g to cancel its bottom min(limit, 6) + * bits. */ + w = (f * g * (f * f - 2)) & m; + } else { + /* In this branch, use a simpler formula that only lets us cancel up to 4 bits of g, as + * eta tends to be smaller here. */ + limit = ((int)eta + 1) > i ? i : ((int)eta + 1); + VERIFY_CHECK(limit > 0 && limit <= 62); + /* m is a mask for the bottom min(limit, 4) bits. */ + m = (UINT64_MAX >> (64 - limit)) & 15U; + /* Find what multiple of f must be added to g to cancel its bottom min(limit, 4) + * bits. */ + w = f + (((f + 1) & 4) << 1); + w = (-w * g) & m; + } + g += f * w; + q += u * w; + r += v * w; + VERIFY_CHECK((g & m) == 0); + } + /* Return data in t and return value. */ + t->u = (int64_t)u; + t->v = (int64_t)v; + t->q = (int64_t)q; + t->r = (int64_t)r; + /* The determinant of t must be a power of two. This guarantees that multiplication with t + * does not change the gcd of f and g, apart from adding a power-of-2 factor to it (which + * will be divided out again). As each divstep's individual matrix has determinant 2, the + * aggregate of 62 of them will have determinant 2^62. */ + VERIFY_CHECK((int128_t)t->u * t->r - (int128_t)t->v * t->q == ((int128_t)1) << 62); + return eta; +} + +/* Compute (t/2^62) * [d, e] mod modulus, where t is a transition matrix scaled by 2^62. + * + * On input and output, d and e are in range (-2*modulus,modulus). All output limbs will be in range + * (-2^62,2^62). + * + * This implements the update_de function from the explanation. + */ +static void rustsecp256k1zkp_v0_4_0_modinv64_update_de_62(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *d, rustsecp256k1zkp_v0_4_0_modinv64_signed62 *e, const rustsecp256k1zkp_v0_4_0_modinv64_trans2x2 *t, const rustsecp256k1zkp_v0_4_0_modinv64_modinfo* modinfo) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t d0 = d->v[0], d1 = d->v[1], d2 = d->v[2], d3 = d->v[3], d4 = d->v[4]; + const int64_t e0 = e->v[0], e1 = e->v[1], e2 = e->v[2], e3 = e->v[3], e4 = e->v[4]; + const int64_t u = t->u, v = t->v, q = t->q, r = t->r; + int64_t md, me, sd, se; + int128_t cd, ce; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ + VERIFY_CHECK((rustsecp256k1zkp_v0_4_0_modinv64_abs(u) + rustsecp256k1zkp_v0_4_0_modinv64_abs(v)) >= 0); /* |u|+|v| doesn't overflow */ + VERIFY_CHECK((rustsecp256k1zkp_v0_4_0_modinv64_abs(q) + rustsecp256k1zkp_v0_4_0_modinv64_abs(r)) >= 0); /* |q|+|r| doesn't overflow */ + VERIFY_CHECK((rustsecp256k1zkp_v0_4_0_modinv64_abs(u) + rustsecp256k1zkp_v0_4_0_modinv64_abs(v)) <= M62 + 1); /* |u|+|v| <= 2^62 */ + VERIFY_CHECK((rustsecp256k1zkp_v0_4_0_modinv64_abs(q) + rustsecp256k1zkp_v0_4_0_modinv64_abs(r)) <= M62 + 1); /* |q|+|r| <= 2^62 */ +#endif + /* [md,me] start as zero; plus [u,q] if d is negative; plus [v,r] if e is negative. */ + sd = d4 >> 63; + se = e4 >> 63; + md = (u & sd) + (v & se); + me = (q & sd) + (r & se); + /* Begin computing t*[d,e]. */ + cd = (int128_t)u * d0 + (int128_t)v * e0; + ce = (int128_t)q * d0 + (int128_t)r * e0; + /* Correct md,me so that t*[d,e]+modulus*[md,me] has 62 zero bottom bits. */ + md -= (modinfo->modulus_inv62 * (uint64_t)cd + md) & M62; + me -= (modinfo->modulus_inv62 * (uint64_t)ce + me) & M62; + /* Update the beginning of computation for t*[d,e]+modulus*[md,me] now md,me are known. */ + cd += (int128_t)modinfo->modulus.v[0] * md; + ce += (int128_t)modinfo->modulus.v[0] * me; + /* Verify that the low 62 bits of the computation are indeed zero, and then throw them away. */ + VERIFY_CHECK(((int64_t)cd & M62) == 0); cd >>= 62; + VERIFY_CHECK(((int64_t)ce & M62) == 0); ce >>= 62; + /* Compute limb 1 of t*[d,e]+modulus*[md,me], and store it as output limb 0 (= down shift). */ + cd += (int128_t)u * d1 + (int128_t)v * e1; + ce += (int128_t)q * d1 + (int128_t)r * e1; + if (modinfo->modulus.v[1]) { /* Optimize for the case where limb of modulus is zero. */ + cd += (int128_t)modinfo->modulus.v[1] * md; + ce += (int128_t)modinfo->modulus.v[1] * me; + } + d->v[0] = (int64_t)cd & M62; cd >>= 62; + e->v[0] = (int64_t)ce & M62; ce >>= 62; + /* Compute limb 2 of t*[d,e]+modulus*[md,me], and store it as output limb 1. */ + cd += (int128_t)u * d2 + (int128_t)v * e2; + ce += (int128_t)q * d2 + (int128_t)r * e2; + if (modinfo->modulus.v[2]) { /* Optimize for the case where limb of modulus is zero. */ + cd += (int128_t)modinfo->modulus.v[2] * md; + ce += (int128_t)modinfo->modulus.v[2] * me; + } + d->v[1] = (int64_t)cd & M62; cd >>= 62; + e->v[1] = (int64_t)ce & M62; ce >>= 62; + /* Compute limb 3 of t*[d,e]+modulus*[md,me], and store it as output limb 2. */ + cd += (int128_t)u * d3 + (int128_t)v * e3; + ce += (int128_t)q * d3 + (int128_t)r * e3; + if (modinfo->modulus.v[3]) { /* Optimize for the case where limb of modulus is zero. */ + cd += (int128_t)modinfo->modulus.v[3] * md; + ce += (int128_t)modinfo->modulus.v[3] * me; + } + d->v[2] = (int64_t)cd & M62; cd >>= 62; + e->v[2] = (int64_t)ce & M62; ce >>= 62; + /* Compute limb 4 of t*[d,e]+modulus*[md,me], and store it as output limb 3. */ + cd += (int128_t)u * d4 + (int128_t)v * e4; + ce += (int128_t)q * d4 + (int128_t)r * e4; + cd += (int128_t)modinfo->modulus.v[4] * md; + ce += (int128_t)modinfo->modulus.v[4] * me; + d->v[3] = (int64_t)cd & M62; cd >>= 62; + e->v[3] = (int64_t)ce & M62; ce >>= 62; + /* What remains is limb 5 of t*[d,e]+modulus*[md,me]; store it as output limb 4. */ + d->v[4] = (int64_t)cd; + e->v[4] = (int64_t)ce; +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, -2) > 0); /* d > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(d, 5, &modinfo->modulus, 1) < 0); /* d < modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, -2) > 0); /* e > -2*modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(e, 5, &modinfo->modulus, 1) < 0); /* e < modulus */ +#endif +} + +/* Compute (t/2^62) * [f, g], where t is a transition matrix scaled by 2^62. + * + * This implements the update_fg function from the explanation. + */ +static void rustsecp256k1zkp_v0_4_0_modinv64_update_fg_62(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *f, rustsecp256k1zkp_v0_4_0_modinv64_signed62 *g, const rustsecp256k1zkp_v0_4_0_modinv64_trans2x2 *t) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t f0 = f->v[0], f1 = f->v[1], f2 = f->v[2], f3 = f->v[3], f4 = f->v[4]; + const int64_t g0 = g->v[0], g1 = g->v[1], g2 = g->v[2], g3 = g->v[3], g4 = g->v[4]; + const int64_t u = t->u, v = t->v, q = t->q, r = t->r; + int128_t cf, cg; + /* Start computing t*[f,g]. */ + cf = (int128_t)u * f0 + (int128_t)v * g0; + cg = (int128_t)q * f0 + (int128_t)r * g0; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + /* Compute limb 1 of t*[f,g], and store it as output limb 0 (= down shift). */ + cf += (int128_t)u * f1 + (int128_t)v * g1; + cg += (int128_t)q * f1 + (int128_t)r * g1; + f->v[0] = (int64_t)cf & M62; cf >>= 62; + g->v[0] = (int64_t)cg & M62; cg >>= 62; + /* Compute limb 2 of t*[f,g], and store it as output limb 1. */ + cf += (int128_t)u * f2 + (int128_t)v * g2; + cg += (int128_t)q * f2 + (int128_t)r * g2; + f->v[1] = (int64_t)cf & M62; cf >>= 62; + g->v[1] = (int64_t)cg & M62; cg >>= 62; + /* Compute limb 3 of t*[f,g], and store it as output limb 2. */ + cf += (int128_t)u * f3 + (int128_t)v * g3; + cg += (int128_t)q * f3 + (int128_t)r * g3; + f->v[2] = (int64_t)cf & M62; cf >>= 62; + g->v[2] = (int64_t)cg & M62; cg >>= 62; + /* Compute limb 4 of t*[f,g], and store it as output limb 3. */ + cf += (int128_t)u * f4 + (int128_t)v * g4; + cg += (int128_t)q * f4 + (int128_t)r * g4; + f->v[3] = (int64_t)cf & M62; cf >>= 62; + g->v[3] = (int64_t)cg & M62; cg >>= 62; + /* What remains is limb 5 of t*[f,g]; store it as output limb 4. */ + f->v[4] = (int64_t)cf; + g->v[4] = (int64_t)cg; +} + +/* Compute (t/2^62) * [f, g], where t is a transition matrix for 62 divsteps. + * + * Version that operates on a variable number of limbs in f and g. + * + * This implements the update_fg function from the explanation. + */ +static void rustsecp256k1zkp_v0_4_0_modinv64_update_fg_62_var(int len, rustsecp256k1zkp_v0_4_0_modinv64_signed62 *f, rustsecp256k1zkp_v0_4_0_modinv64_signed62 *g, const rustsecp256k1zkp_v0_4_0_modinv64_trans2x2 *t) { + const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + const int64_t u = t->u, v = t->v, q = t->q, r = t->r; + int64_t fi, gi; + int128_t cf, cg; + int i; + VERIFY_CHECK(len > 0); + /* Start computing t*[f,g]. */ + fi = f->v[0]; + gi = g->v[0]; + cf = (int128_t)u * fi + (int128_t)v * gi; + cg = (int128_t)q * fi + (int128_t)r * gi; + /* Verify that the bottom 62 bits of the result are zero, and then throw them away. */ + VERIFY_CHECK(((int64_t)cf & M62) == 0); cf >>= 62; + VERIFY_CHECK(((int64_t)cg & M62) == 0); cg >>= 62; + /* Now iteratively compute limb i=1..len of t*[f,g], and store them in output limb i-1 (shifting + * down by 62 bits). */ + for (i = 1; i < len; ++i) { + fi = f->v[i]; + gi = g->v[i]; + cf += (int128_t)u * fi + (int128_t)v * gi; + cg += (int128_t)q * fi + (int128_t)r * gi; + f->v[i - 1] = (int64_t)cf & M62; cf >>= 62; + g->v[i - 1] = (int64_t)cg & M62; cg >>= 62; + } + /* What remains is limb (len) of t*[f,g]; store it as output limb (len-1). */ + f->v[len - 1] = (int64_t)cf; + g->v[len - 1] = (int64_t)cg; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (constant time in x). */ +static void rustsecp256k1zkp_v0_4_0_modinv64(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *x, const rustsecp256k1zkp_v0_4_0_modinv64_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, zeta=-1. */ + rustsecp256k1zkp_v0_4_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 g = *x; + int i; + int64_t zeta = -1; /* zeta = -(delta+1/2); delta starts at 1/2. */ + + /* Do 10 iterations of 59 divsteps each = 590 divsteps. This suffices for 256-bit inputs. */ + for (i = 0; i < 10; ++i) { + /* Compute transition matrix and new zeta after 59 divsteps. */ + rustsecp256k1zkp_v0_4_0_modinv64_trans2x2 t; + zeta = rustsecp256k1zkp_v0_4_0_modinv64_divsteps_59(zeta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1zkp_v0_4_0_modinv64_update_de_62(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1zkp_v0_4_0_modinv64_update_fg_62(&f, &g, &t); +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, 5, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point sufficient iterations have been performed that g must have reached 0 + * and (if g was not originally 0) f must now equal +/- GCD of the initial f, g + * values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, 5, &SECP256K1_SIGNED62_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, 1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, 5, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1zkp_v0_4_0_modinv64_normalize_62(&d, f.v[4], modinfo); + *x = d; +} + +/* Compute the inverse of x modulo modinfo->modulus, and replace x with it (variable time). */ +static void rustsecp256k1zkp_v0_4_0_modinv64_var(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *x, const rustsecp256k1zkp_v0_4_0_modinv64_modinfo *modinfo) { + /* Start with d=0, e=1, f=modulus, g=x, eta=-1. */ + rustsecp256k1zkp_v0_4_0_modinv64_signed62 d = {{0, 0, 0, 0, 0}}; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 e = {{1, 0, 0, 0, 0}}; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 f = modinfo->modulus; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 g = *x; +#ifdef VERIFY + int i = 0; +#endif + int j, len = 5; + int64_t eta = -1; /* eta = -delta; delta is initially 1 */ + int64_t cond, fn, gn; + + /* Do iterations of 62 divsteps each until g=0. */ + while (1) { + /* Compute transition matrix and new eta after 62 divsteps. */ + rustsecp256k1zkp_v0_4_0_modinv64_trans2x2 t; + eta = rustsecp256k1zkp_v0_4_0_modinv64_divsteps_62_var(eta, f.v[0], g.v[0], &t); + /* Update d,e using that transition matrix. */ + rustsecp256k1zkp_v0_4_0_modinv64_update_de_62(&d, &e, &t, modinfo); + /* Update f,g using that transition matrix. */ +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + rustsecp256k1zkp_v0_4_0_modinv64_update_fg_62_var(len, &f, &g, &t); + /* If the bottom limb of g is zero, there is a chance that g=0. */ + if (g.v[0] == 0) { + cond = 0; + /* Check if the other limbs are also 0. */ + for (j = 1; j < len; ++j) { + cond |= g.v[j]; + } + /* If so, we're done. */ + if (cond == 0) break; + } + + /* Determine if len>1 and limb (len-1) of both f and g is 0 or -1. */ + fn = f.v[len - 1]; + gn = g.v[len - 1]; + cond = ((int64_t)len - 2) >> 63; + cond |= fn ^ (fn >> 63); + cond |= gn ^ (gn >> 63); + /* If so, reduce length, propagating the sign of f and g's top limb into the one below. */ + if (cond == 0) { + f.v[len - 2] |= (uint64_t)fn << 62; + g.v[len - 2] |= (uint64_t)gn << 62; + --len; + } +#ifdef VERIFY + VERIFY_CHECK(++i < 12); /* We should never need more than 12*62 = 744 divsteps */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) > 0); /* f > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) <= 0); /* f <= modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, -1) > 0); /* g > -modulus */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, len, &modinfo->modulus, 1) < 0); /* g < modulus */ +#endif + } + + /* At this point g is 0 and (if g was not originally 0) f must now equal +/- GCD of + * the initial f, g values i.e. +/- 1, and d now contains +/- the modular inverse. */ +#ifdef VERIFY + /* g == 0 */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&g, len, &SECP256K1_SIGNED62_ONE, 0) == 0); + /* |f| == 1, or (x == 0 and d == 0 and |f|=modulus) */ + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, -1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &SECP256K1_SIGNED62_ONE, 1) == 0 || + (rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(x, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&d, 5, &SECP256K1_SIGNED62_ONE, 0) == 0 && + (rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, 1) == 0 || + rustsecp256k1zkp_v0_4_0_modinv64_mul_cmp_62(&f, len, &modinfo->modulus, -1) == 0))); +#endif + + /* Optionally negate d, normalize to [0,modulus), and return it. */ + rustsecp256k1zkp_v0_4_0_modinv64_normalize_62(&d, f.v[len - 1], modinfo); + *x = d; +} + +#endif /* SECP256K1_MODINV64_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdh/main_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdh/main_impl.h index 6984b898..1d593291 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdh/main_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdh/main_impl.h @@ -7,8 +7,8 @@ #ifndef SECP256K1_MODULE_ECDH_MAIN_H #define SECP256K1_MODULE_ECDH_MAIN_H -#include "include/secp256k1_ecdh.h" -#include "ecmult_const_impl.h" +#include "../../../include/secp256k1_ecdh.h" +#include "../../ecmult_const_impl.h" static int ecdh_hash_function_sha256(unsigned char *output, const unsigned char *x32, const unsigned char *y32, void *data) { unsigned char version = (y32[31] & 0x01) | 0x02; diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/main_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/main_impl.h index 7efbb038..2c243740 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/main_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/main_impl.h @@ -341,6 +341,17 @@ int rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_recover(const rustsecp256k1zkp_v0_4_0_ * branch point. */ rustsecp256k1zkp_v0_4_0_declassify(ctx, &enckey_expected_ge, sizeof(enckey_expected_ge)); if (!rustsecp256k1zkp_v0_4_0_eckey_pubkey_serialize(&enckey_expected_ge, enckey_expected33, &size, SECP256K1_EC_COMPRESSED)) { + /* Unreachable from tests (and other VERIFY builds) and therefore this + * branch should be ignored in test coverage analysis. + * + * Proof: + * eckey_pubkey_serialize fails <=> deckey = 0 + * deckey = 0 <=> s^-1 = 0 or sp = 0 + * case 1: s^-1 = 0 impossible by the definition of multiplicative + * inverse and because the scalar_inverse implementation + * VERIFY_CHECKs that the inputs are valid scalars. + * case 2: sp = 0 impossible because ecdsa_adaptor_sig_deserialize would have already failed + */ return 0; } if (!rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize(ctx, enckey33, &size, enckey, SECP256K1_EC_COMPRESSED)) { diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/tests_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/tests_impl.h index 81c1d9b6..3cf19c11 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/tests_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/ecdsa_adaptor/tests_impl.h @@ -1032,7 +1032,15 @@ void adaptor_tests(void) { CHECK(rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_verify(ctx, adaptor_sig, &enckey, msg, &enckey) == 0); CHECK(rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_verify(ctx, adaptor_sig, &pubkey, msg, &pubkey) == 0); { - unsigned char adaptor_sig_tmp[65]; + /* Test failed adaptor sig deserialization */ + unsigned char adaptor_sig_tmp[162]; + memset(&adaptor_sig_tmp, 0xFF, 162); + CHECK(rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_verify(ctx, adaptor_sig_tmp, &pubkey, msg, &enckey) == 0); + } + { + /* Test that any flipped bit in the adaptor signature will make + * verification fail */ + unsigned char adaptor_sig_tmp[162]; memcpy(adaptor_sig_tmp, adaptor_sig, sizeof(adaptor_sig_tmp)); rand_flip_bit(&adaptor_sig_tmp[1], sizeof(adaptor_sig_tmp) - 1); CHECK(rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_verify(ctx, adaptor_sig_tmp, &pubkey, msg, &enckey) == 0); @@ -1102,15 +1110,8 @@ void adaptor_tests(void) { } { /* Test key recover */ - rustsecp256k1zkp_v0_4_0_ecdsa_signature sig_tmp; unsigned char decryption_key_tmp[32]; unsigned char adaptor_sig_tmp[162]; - const unsigned char order_le[32] = { - 0x41, 0x41, 0x36, 0xd0, 0x8c, 0x5e, 0xd2, 0xbf, - 0x3b, 0xa0, 0x48, 0xaf, 0xe6, 0xdc, 0xae, 0xba, - 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff - }; CHECK(rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_recover(ctx, decryption_key_tmp, &sig, adaptor_sig, &enckey) == 1); CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(deckey, decryption_key_tmp, sizeof(deckey)) == 0); @@ -1119,11 +1120,6 @@ void adaptor_tests(void) { memcpy(adaptor_sig_tmp, adaptor_sig, sizeof(adaptor_sig_tmp)); memset(&adaptor_sig_tmp[66], 0xFF, 32); CHECK(rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_recover(ctx, decryption_key_tmp, &sig, adaptor_sig_tmp, &enckey) == 0); - - /* Test failed enckey_expected serialization */ - memcpy(sig_tmp.data, sig.data, 32); - memcpy(&sig_tmp.data[32], order_le, 32); - CHECK(rustsecp256k1zkp_v0_4_0_ecdsa_adaptor_recover(ctx, decryption_key_tmp, &sig_tmp, adaptor_sig, &enckey) == 0); } } diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include index 2ca8cf0b..e6038bc4 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/Makefile.am.include @@ -2,3 +2,5 @@ include_HEADERS += include/rustsecp256k1zkp_v0_4_0_extrakeys.h noinst_HEADERS += src/modules/extrakeys/tests_impl.h noinst_HEADERS += src/modules/extrakeys/tests_exhaustive_impl.h noinst_HEADERS += src/modules/extrakeys/main_impl.h +noinst_HEADERS += src/modules/extrakeys/hsort.h +noinst_HEADERS += src/modules/extrakeys/hsort_impl.h diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/hsort.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/hsort.h new file mode 100644 index 00000000..54e31550 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/hsort.h @@ -0,0 +1,22 @@ +/*********************************************************************** + * Copyright (c) 2021 Russell O'Connor, Jonas Nick * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + ***********************************************************************/ + +#ifndef SECP256K1_HSORT_H_ +#define SECP256K1_HSORT_H_ + +#include +#include + +/* In-place, iterative heapsort with an interface matching glibc's qsort_r. This + * is preferred over standard library implementations because they generally + * make no guarantee about being fast for malicious inputs. + * + * See the qsort_r manpage for a description of the interface. + */ +static void rustsecp256k1zkp_v0_4_0_hsort(void *ptr, size_t count, size_t size, + int (*cmp)(const void *, const void *, void *), + void *cmp_data); +#endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/hsort_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/hsort_impl.h new file mode 100644 index 00000000..7a2cf7ef --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/hsort_impl.h @@ -0,0 +1,116 @@ +/*********************************************************************** + * Copyright (c) 2021 Russell O'Connor, Jonas Nick * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + ***********************************************************************/ + +#ifndef SECP256K1_HSORT_IMPL_H_ +#define SECP256K1_HSORT_IMPL_H_ + +#include "hsort.h" + +/* An array is a heap when, for all non-zero indexes i, the element at index i + * compares as less than or equal to the element at index parent(i) = (i-1)/2. + */ + +static SECP256K1_INLINE size_t child1(size_t i) { + VERIFY_CHECK(i <= (SIZE_MAX - 1)/2); + return 2*i + 1; +} + +static SECP256K1_INLINE size_t child2(size_t i) { + VERIFY_CHECK(i <= SIZE_MAX/2 - 1); + return child1(i)+1; +} + +static SECP256K1_INLINE void swap64(unsigned char *a, size_t i, size_t j, size_t stride) { + unsigned char tmp[64]; + VERIFY_CHECK(stride <= 64); + memcpy(tmp, a + i*stride, stride); + memmove(a + i*stride, a + j*stride, stride); + memcpy(a + j*stride, tmp, stride); +} + +static SECP256K1_INLINE void swap(unsigned char *a, size_t i, size_t j, size_t stride) { + while (64 < stride) { + swap64(a + (stride - 64), i, j, 64); + stride -= 64; + } + swap64(a, i, j, stride); +} + +static SECP256K1_INLINE void heap_down(unsigned char *a, size_t i, size_t heap_size, size_t stride, + int (*cmp)(const void *, const void *, void *), void *cmp_data) { + while (i < heap_size/2) { + VERIFY_CHECK(i <= SIZE_MAX/2 - 1); + /* Proof: + * i < heap_size/2 + * i + 1 <= heap_size/2 + * 2*i + 2 <= heap_size <= SIZE_MAX + * 2*i <= SIZE_MAX - 2 + */ + + VERIFY_CHECK(child1(i) < heap_size); + /* Proof: + * i < heap_size/2 + * i + 1 <= heap_size/2 + * 2*i + 2 <= heap_size + * 2*i + 1 < heap_size + * child1(i) < heap_size + */ + + /* Let [x] be notation for the contents at a[x*stride]. + * + * If [child1(i)] > [i] and [child2(i)] > [i], + * swap [i] with the larger child to ensure the new parent is larger + * than both children. When [child1(i)] == [child2(i)], swap [i] with + * [child2(i)]. + * Else if [child1(i)] > [i], swap [i] with [child1(i)]. + * Else if [child2(i)] > [i], swap [i] with [child2(i)]. + */ + if (child2(i) < heap_size + && 0 <= cmp(a + child2(i)*stride, a + child1(i)*stride, cmp_data)) { + if (0 < cmp(a + child2(i)*stride, a + i*stride, cmp_data)) { + swap(a, i, child2(i), stride); + i = child2(i); + } else { + /* At this point we have [child2(i)] >= [child1(i)] and we have + * [child2(i)] <= [i], and thus [child1(i)] <= [i] which means + * that the next comparison can be skipped. */ + return; + } + } else if (0 < cmp(a + child1(i)*stride, a + i*stride, cmp_data)) { + swap(a, i, child1(i), stride); + i = child1(i); + } else { + return; + } + } + /* heap_size/2 <= i + * heap_size/2 < i + 1 + * heap_size < 2*i + 2 + * heap_size <= 2*i + 1 + * heap_size <= child1(i) + * Thus child1(i) and child2(i) are now out of bounds and we are at a leaf. + */ +} + +/* In-place heap sort. */ +static void rustsecp256k1zkp_v0_4_0_hsort(void *ptr, size_t count, size_t size, + int (*cmp)(const void *, const void *, void *), + void *cmp_data ) { + size_t i; + + for(i = count/2; 0 < i; --i) { + heap_down(ptr, i-1, count, size, cmp, cmp_data); + } + for(i = count; 1 < i; --i) { + /* Extract the largest value from the heap */ + swap(ptr, 0, i-1, size); + + /* Repair the heap condition */ + heap_down(ptr, 0, i-1, size, cmp, cmp_data); + } +} + +#endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h index 29d76008..41a1c699 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/main_impl.h @@ -7,8 +7,9 @@ #ifndef SECP256K1_MODULE_EXTRAKEYS_MAIN_H #define SECP256K1_MODULE_EXTRAKEYS_MAIN_H -#include "include/secp256k1.h" -#include "include/secp256k1_extrakeys.h" +#include "../../../include/secp256k1.h" +#include "../../../include/secp256k1_extrakeys.h" +#include "hsort_impl.h" static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_ge *ge, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey) { return rustsecp256k1zkp_v0_4_0_pubkey_load(ctx, ge, (const rustsecp256k1zkp_v0_4_0_pubkey *) pubkey); @@ -55,6 +56,32 @@ int rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(const rustsecp256k1zkp_v0_4_0 return 1; } +int rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_xonly_pubkey* pk0, const rustsecp256k1zkp_v0_4_0_xonly_pubkey* pk1) { + unsigned char out[2][32]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pk0; pk[1] = pk1; + for (i = 0; i < 2; i++) { + /* If the public key is NULL or invalid, xonly_pubkey_serialize will + * call the illegal_callback and return 0. In that case we will + * serialize the key as all zeros which is less than any valid public + * key. This results in consistent comparisons even if NULL or invalid + * pubkeys are involved and prevents edge cases such as sorting + * algorithms that use this function and do not terminate as a + * result. */ + if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, out[i], pk[i])) { + /* Note that xonly_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return rustsecp256k1zkp_v0_4_0_memcmp_var(out[0], out[1], sizeof(out[1])); +} + /** Keeps a group element as is if it has an even Y and otherwise negates it. * y_parity is set to 0 in the former case and to 1 in the latter case. * Requires that the coordinates of r are normalized. */ @@ -128,6 +155,28 @@ int rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_add_check(const rustsecp256k1zkp_ && rustsecp256k1zkp_v0_4_0_fe_is_odd(&pk.y) == tweaked_pk_parity; } +/* This struct wraps a const context pointer to satisfy the rustsecp256k1zkp_v0_4_0_hsort api + * which expects a non-const cmp_data pointer. */ +typedef struct { + const rustsecp256k1zkp_v0_4_0_context *ctx; +} rustsecp256k1zkp_v0_4_0_xonly_sort_cmp_data; + +static int rustsecp256k1zkp_v0_4_0_xonly_sort_cmp(const void* pk1, const void* pk2, void *cmp_data) { + return rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(((rustsecp256k1zkp_v0_4_0_xonly_sort_cmp_data*)cmp_data)->ctx, + *(rustsecp256k1zkp_v0_4_0_xonly_pubkey **)pk1, + *(rustsecp256k1zkp_v0_4_0_xonly_pubkey **)pk2); +} + +int rustsecp256k1zkp_v0_4_0_xonly_sort(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_xonly_pubkey **pubkeys, size_t n_pubkeys) { + rustsecp256k1zkp_v0_4_0_xonly_sort_cmp_data cmp_data; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(pubkeys != NULL); + + cmp_data.ctx = ctx; + rustsecp256k1zkp_v0_4_0_hsort(pubkeys, n_pubkeys, sizeof(*pubkeys), rustsecp256k1zkp_v0_4_0_xonly_sort_cmp, &cmp_data); + return 1; +} + static void rustsecp256k1zkp_v0_4_0_keypair_save(rustsecp256k1zkp_v0_4_0_keypair *keypair, const rustsecp256k1zkp_v0_4_0_scalar *sk, rustsecp256k1zkp_v0_4_0_ge *pk) { rustsecp256k1zkp_v0_4_0_scalar_get_b32(&keypair->data[0], sk); rustsecp256k1zkp_v0_4_0_pubkey_save((rustsecp256k1zkp_v0_4_0_pubkey *)&keypair->data[32], pk); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h index 7be7b3e9..cb1ffe3f 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_exhaustive_impl.h @@ -8,7 +8,7 @@ #define SECP256K1_MODULE_EXTRAKEYS_TESTS_EXHAUSTIVE_H #include "src/modules/extrakeys/main_impl.h" -#include "include/secp256k1_extrakeys.h" +#include "../../../include/secp256k1_extrakeys.h" static void test_exhaustive_extrakeys(const rustsecp256k1zkp_v0_4_0_context *ctx, const rustsecp256k1zkp_v0_4_0_ge* group) { rustsecp256k1zkp_v0_4_0_keypair keypair[EXHAUSTIVE_TEST_ORDER - 1]; diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h index bd39c081..c2b44a7c 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/extrakeys/tests_impl.h @@ -7,7 +7,7 @@ #ifndef SECP256K1_MODULE_EXTRAKEYS_TESTS_H #define SECP256K1_MODULE_EXTRAKEYS_TESTS_H -#include "secp256k1_extrakeys.h" +#include "../../../include/secp256k1_extrakeys.h" static rustsecp256k1zkp_v0_4_0_context* api_test_context(int flags, int *ecount) { rustsecp256k1zkp_v0_4_0_context *ctx0 = rustsecp256k1zkp_v0_4_0_context_create(flags); @@ -137,6 +137,43 @@ void test_xonly_pubkey(void) { rustsecp256k1zkp_v0_4_0_context_destroy(verify); } +void test_xonly_pubkey_comparison(void) { + unsigned char pk1_ser[32] = { + 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11, + 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23 + }; + const unsigned char pk2_ser[32] = { + 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, + 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c + }; + rustsecp256k1zkp_v0_4_0_xonly_pubkey pk1; + rustsecp256k1zkp_v0_4_0_xonly_pubkey pk2; + int ecount = 0; + rustsecp256k1zkp_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(none, &pk1, pk1_ser) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(none, &pk2, pk2_ser) == 1); + + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, NULL, &pk2) < 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk1, NULL) > 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk2, &pk2) == 0); + CHECK(ecount == 2); + memset(&pk1, 0, sizeof(pk1)); /* illegal pubkey */ + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk1, &pk2) < 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk1, &pk1) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_cmp(none, &pk2, &pk1) > 0); + CHECK(ecount == 6); + + rustsecp256k1zkp_v0_4_0_context_destroy(none); +} + void test_xonly_pubkey_tweak(void) { unsigned char zeros64[64] = { 0 }; unsigned char overflows[32]; @@ -534,16 +571,167 @@ void test_keypair_add(void) { rustsecp256k1zkp_v0_4_0_context_destroy(verify); } +static void test_hsort_is_sorted(int *ints, size_t n) { + size_t i; + for (i = 1; i < n; i++) { + CHECK(ints[i-1] <= ints[i]); + } +} + +static int test_hsort_cmp(const void *i1, const void *i2, void *counter) { + *(size_t*)counter += 1; + return *(int*)i1 - *(int*)i2; +} + +#define NUM 64 +void test_hsort(void) { + int ints[NUM] = { 0 }; + size_t counter = 0; + int i, j; + + rustsecp256k1zkp_v0_4_0_hsort(ints, 0, sizeof(ints[0]), test_hsort_cmp, &counter); + CHECK(counter == 0); + rustsecp256k1zkp_v0_4_0_hsort(ints, 1, sizeof(ints[0]), test_hsort_cmp, &counter); + CHECK(counter == 0); + rustsecp256k1zkp_v0_4_0_hsort(ints, NUM, sizeof(ints[0]), test_hsort_cmp, &counter); + CHECK(counter > 0); + test_hsort_is_sorted(ints, NUM); + + /* Test hsort with length n array and random elements in + * [-interval/2, interval/2] */ + for (i = 0; i < count; i++) { + int n = rustsecp256k1zkp_v0_4_0_testrand_int(NUM); + int interval = rustsecp256k1zkp_v0_4_0_testrand_int(64); + for (j = 0; j < n; j++) { + ints[j] = rustsecp256k1zkp_v0_4_0_testrand_int(interval) - interval/2; + } + rustsecp256k1zkp_v0_4_0_hsort(ints, n, sizeof(ints[0]), test_hsort_cmp, &counter); + test_hsort_is_sorted(ints, n); + } +} +#undef NUM + +void test_xonly_sort_helper(rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk, size_t *pk_order, size_t n_pk) { + size_t i; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_test[5]; + + for (i = 0; i < n_pk; i++) { + pk_test[i] = &pk[pk_order[i]]; + } + rustsecp256k1zkp_v0_4_0_xonly_sort(ctx, pk_test, n_pk); + for (i = 0; i < n_pk; i++) { + CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(pk_test[i], &pk[i], sizeof(*pk_test[i])) == 0); + } +} + +void permute(size_t *arr, size_t n) { + size_t i; + for (i = n - 1; i >= 1; i--) { + size_t tmp, j; + j = rustsecp256k1zkp_v0_4_0_testrand_int(i + 1); + tmp = arr[i]; + arr[i] = arr[j]; + arr[j] = tmp; + } +} + +void rand_xonly_pk(rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk) { + unsigned char seckey[32]; + rustsecp256k1zkp_v0_4_0_keypair keypair; + rustsecp256k1zkp_v0_4_0_testrand256(seckey); + CHECK(rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, seckey) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, pk, NULL, &keypair) == 1); +} + +void test_xonly_sort_api(void) { + int ecount = 0; + rustsecp256k1zkp_v0_4_0_xonly_pubkey pks[2]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pks_ptr[2]; + rustsecp256k1zkp_v0_4_0_context *none = api_test_context(SECP256K1_CONTEXT_NONE, &ecount); + + pks_ptr[0] = &pks[0]; + pks_ptr[1] = &pks[1]; + + rand_xonly_pk(&pks[0]); + rand_xonly_pk(&pks[1]); + + CHECK(rustsecp256k1zkp_v0_4_0_xonly_sort(none, pks_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_sort(none, NULL, 2) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_sort(none, pks_ptr, 0) == 1); + /* Test illegal public keys */ + memset(&pks[0], 0, sizeof(pks[0])); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_sort(none, pks_ptr, 2) == 1); + CHECK(ecount == 2); + memset(&pks[1], 0, sizeof(pks[1])); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_sort(none, pks_ptr, 2) == 1); + CHECK(ecount > 2); + + rustsecp256k1zkp_v0_4_0_context_destroy(none); +} + +void test_xonly_sort(void) { + rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[5]; + unsigned char pk_ser[5][32]; + int i; + size_t pk_order[5] = { 0, 1, 2, 3, 4 }; + + for (i = 0; i < 5; i++) { + memset(pk_ser[i], 0, sizeof(pk_ser[i])); + } + pk_ser[0][0] = 5; + pk_ser[1][0] = 8; + pk_ser[2][0] = 0x0a; + pk_ser[3][0] = 0x0b; + pk_ser[4][0] = 0x0c; + for (i = 0; i < 5; i++) { + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(ctx, &pk[i], pk_ser[i])); + } + + permute(pk_order, 1); + test_xonly_sort_helper(pk, pk_order, 1); + permute(pk_order, 2); + test_xonly_sort_helper(pk, pk_order, 2); + permute(pk_order, 3); + test_xonly_sort_helper(pk, pk_order, 3); + for (i = 0; i < count; i++) { + permute(pk_order, 4); + test_xonly_sort_helper(pk, pk_order, 4); + } + for (i = 0; i < count; i++) { + permute(pk_order, 5); + test_xonly_sort_helper(pk, pk_order, 5); + } + /* Check that sorting also works for random pubkeys */ + for (i = 0; i < count; i++) { + int j; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_ptr[5]; + for (j = 0; j < 5; j++) { + rand_xonly_pk(&pk[j]); + pk_ptr[j] = &pk[j]; + } + rustsecp256k1zkp_v0_4_0_xonly_sort(ctx, pk_ptr, 5); + for (j = 1; j < 5; j++) { + CHECK(rustsecp256k1zkp_v0_4_0_xonly_sort_cmp(&pk_ptr[j - 1], &pk_ptr[j], ctx) <= 0); + } + } +} + void run_extrakeys_tests(void) { /* xonly key test cases */ test_xonly_pubkey(); test_xonly_pubkey_tweak(); test_xonly_pubkey_tweak_check(); test_xonly_pubkey_tweak_recursive(); + test_xonly_pubkey_comparison(); /* keypair tests */ test_keypair(); test_keypair_add(); + + test_hsort(); + test_xonly_sort_api(); + test_xonly_sort(); } #endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/Makefile.am.include b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/Makefile.am.include index 7546ca52..0a5440f3 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/Makefile.am.include +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/Makefile.am.include @@ -1,5 +1,9 @@ include_HEADERS += include/rustsecp256k1zkp_v0_4_0_musig.h noinst_HEADERS += src/modules/musig/main_impl.h +noinst_HEADERS += src/modules/musig/keyagg_impl.h +noinst_HEADERS += src/modules/musig/session.h +noinst_HEADERS += src/modules/musig/session_impl.h +noinst_HEADERS += src/modules/musig/adaptor_impl.h noinst_HEADERS += src/modules/musig/tests_impl.h noinst_PROGRAMS += example_musig diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/adaptor_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/adaptor_impl.h new file mode 100644 index 00000000..448f6b12 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/adaptor_impl.h @@ -0,0 +1,84 @@ +/********************************************************************** + * Copyright (c) 2021 Jonas Nick * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_MODULE_MUSIG_ADAPTOR_IMPL_ +#define _SECP256K1_MODULE_MUSIG_ADAPTOR_IMPL_ + +#include "session.h" + +int rustsecp256k1zkp_v0_4_0_musig_nonce_parity(const rustsecp256k1zkp_v0_4_0_context* ctx, int *nonce_parity, rustsecp256k1zkp_v0_4_0_musig_session *session) { + rustsecp256k1zkp_v0_4_0_musig_session_internal session_i; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(nonce_parity != NULL); + ARG_CHECK(session != NULL); + + if (!rustsecp256k1zkp_v0_4_0_musig_session_load(ctx, &session_i, session)) { + return 0; + } + *nonce_parity = session_i.fin_nonce_parity; + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_adapt(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *sec_adaptor32, int nonce_parity) { + rustsecp256k1zkp_v0_4_0_scalar s; + rustsecp256k1zkp_v0_4_0_scalar t; + int overflow; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(sec_adaptor32 != NULL); + + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&s, &sig64[32], &overflow); + if (overflow) { + return 0; + } + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&t, sec_adaptor32, &overflow); + if (overflow) { + rustsecp256k1zkp_v0_4_0_scalar_clear(&t); + return 0; + } + + if (nonce_parity) { + rustsecp256k1zkp_v0_4_0_scalar_negate(&t, &t); + } + + rustsecp256k1zkp_v0_4_0_scalar_add(&s, &s, &t); + rustsecp256k1zkp_v0_4_0_scalar_get_b32(&sig64[32], &s); + rustsecp256k1zkp_v0_4_0_scalar_clear(&t); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sec_adaptor32, const unsigned char *sig64, const unsigned char *pre_sig64, int nonce_parity) { + rustsecp256k1zkp_v0_4_0_scalar t; + rustsecp256k1zkp_v0_4_0_scalar s; + int overflow; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(sec_adaptor32 != NULL); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(pre_sig64 != NULL); + + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&t, &sig64[32], &overflow); + if (overflow) { + return 0; + } + rustsecp256k1zkp_v0_4_0_scalar_negate(&t, &t); + + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&s, &pre_sig64[32], &overflow); + if (overflow) { + return 0; + } + rustsecp256k1zkp_v0_4_0_scalar_add(&t, &t, &s); + + if (!nonce_parity) { + rustsecp256k1zkp_v0_4_0_scalar_negate(&t, &t); + } + rustsecp256k1zkp_v0_4_0_scalar_get_b32(sec_adaptor32, &t); + rustsecp256k1zkp_v0_4_0_scalar_clear(&t); + return 1; +} + +#endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/example.c b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/example.c index 3f129282..21c1f87c 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/example.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/example.c @@ -15,122 +15,129 @@ #include #include +struct signer_secrets_t { + rustsecp256k1zkp_v0_4_0_keypair keypair; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce; +}; + +struct signer_t { + rustsecp256k1zkp_v0_4_0_xonly_pubkey pubkey; + rustsecp256k1zkp_v0_4_0_musig_pubnonce pubnonce; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig; +}; + /* Number of public keys involved in creating the aggregate signature */ #define N_SIGNERS 3 /* Create a key pair and store it in seckey and pubkey */ -int create_keypair(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *seckey, rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey) { +int create_keypair(const rustsecp256k1zkp_v0_4_0_context* ctx, struct signer_secrets_t *signer_secrets, struct signer_t *signer) { int ret; - rustsecp256k1zkp_v0_4_0_keypair keypair; + unsigned char seckey[32]; FILE *frand = fopen("/dev/urandom", "r"); if (frand == NULL) { return 0; } do { - if(!fread(seckey, 32, 1, frand)) { + if(!fread(seckey, sizeof(seckey), 1, frand)) { fclose(frand); return 0; } /* The probability that this not a valid secret key is approximately 2^-128 */ } while (!rustsecp256k1zkp_v0_4_0_ec_seckey_verify(ctx, seckey)); fclose(frand); - ret = rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, seckey); - ret &= rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, pubkey, NULL, &keypair); + ret = rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &signer_secrets->keypair, seckey); + ret &= rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, &signer->pubkey, NULL, &signer_secrets->keypair); return ret; } /* Sign a message hash with the given key pairs and store the result in sig */ -int sign(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char seckeys[][32], const rustsecp256k1zkp_v0_4_0_xonly_pubkey* pubkeys, const unsigned char* msg32, unsigned char *sig64) { - rustsecp256k1zkp_v0_4_0_musig_session musig_session[N_SIGNERS]; - unsigned char nonce_commitment[N_SIGNERS][32]; - const unsigned char *nonce_commitment_ptr[N_SIGNERS]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signer_data[N_SIGNERS][N_SIGNERS]; - unsigned char nonce[N_SIGNERS][32]; - int i, j; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig[N_SIGNERS]; +int sign(const rustsecp256k1zkp_v0_4_0_context* ctx, struct signer_secrets_t *signer_secrets, struct signer_t *signer, const unsigned char* msg32, unsigned char *sig64) { + int i; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkeys[N_SIGNERS]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonces[N_SIGNERS]; + rustsecp256k1zkp_v0_4_0_musig_aggnonce agg_pubnonce; + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sigs[N_SIGNERS]; + /* The same for all signers */ + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache cache; + rustsecp256k1zkp_v0_4_0_musig_session session; for (i = 0; i < N_SIGNERS; i++) { FILE *frand; - unsigned char session_id32[32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session; - - /* Create combined pubkey and initialize signer data */ - if (!rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, NULL, &combined_pk, &pre_session, pubkeys, N_SIGNERS)) { - return 0; - } + unsigned char seckey[32]; + unsigned char session_id[32]; /* Create random session ID. It is absolutely necessary that the session ID - * is unique for every call of rustsecp256k1zkp_v0_4_0_musig_session_init. Otherwise + * is unique for every call of rustsecp256k1zkp_v0_4_0_musig_nonce_gen. Otherwise * it's trivial for an attacker to extract the secret key! */ frand = fopen("/dev/urandom", "r"); if(frand == NULL) { return 0; } - if (!fread(session_id32, 32, 1, frand)) { + if (!fread(session_id, 32, 1, frand)) { fclose(frand); return 0; } fclose(frand); - /* Initialize session */ - if (!rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &musig_session[i], signer_data[i], nonce_commitment[i], session_id32, msg32, &combined_pk, &pre_session, N_SIGNERS, i, seckeys[i])) { + if (!rustsecp256k1zkp_v0_4_0_keypair_sec(ctx, seckey, &signer_secrets[i].keypair)) { return 0; } - nonce_commitment_ptr[i] = &nonce_commitment[i][0]; - } - /* Communication round 1: Exchange nonce commitments */ - for (i = 0; i < N_SIGNERS; i++) { - /* Set nonce commitments in the signer data and get the own public nonce */ - if (!rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &musig_session[i], signer_data[i], nonce[i], nonce_commitment_ptr, N_SIGNERS, NULL)) { + /* Initialize session and create secret nonce for signing and public + * nonce to send to the other signers. */ + if (!rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &signer_secrets[i].secnonce, &signer[i].pubnonce, session_id, seckey, msg32, NULL, NULL)) { return 0; } + pubkeys[i] = &signer[i].pubkey; + pubnonces[i] = &signer[i].pubnonce; } - /* Communication round 2: Exchange nonces */ + /* Communication round 1: Exchange nonces */ for (i = 0; i < N_SIGNERS; i++) { - for (j = 0; j < N_SIGNERS; j++) { - if (!rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signer_data[i][j], nonce[j])) { - /* Signer j's nonce does not match the nonce commitment. In this case - * abort the protocol. If you make another attempt at finishing the - * protocol, create a new session (with a fresh session ID!). */ - return 0; - } + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk; + + /* Create aggregate pubkey, aggregate nonce and initialize signer data */ + if (!rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, NULL, &agg_pk, &cache, pubkeys, N_SIGNERS)) { + return 0; } - if (!rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &musig_session[i], signer_data[i], N_SIGNERS, NULL, NULL)) { + if(!rustsecp256k1zkp_v0_4_0_musig_nonce_agg(ctx, &agg_pubnonce, pubnonces, N_SIGNERS)) { return 0; } - } - for (i = 0; i < N_SIGNERS; i++) { - if (!rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &musig_session[i], &partial_sig[i])) { + if(!rustsecp256k1zkp_v0_4_0_musig_nonce_process(ctx, &session, &agg_pubnonce, msg32, &cache, NULL)) { return 0; } + /* partial_sign will clear the secnonce by setting it 0. That's because + * you must _never_ reuse the secnonce (or use the same session_id to + * create a secnonce). If you do, you effectively reuse the nonce and + * leak the secret key. */ + if (!rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &signer[i].partial_sig, &signer_secrets[i].secnonce, &signer_secrets[i].keypair, &cache, &session)) { + return 0; + } + partial_sigs[i] = &signer[i].partial_sig; } - /* Communication round 3: Exchange partial signatures */ + /* Communication round 2: Exchange partial signatures */ for (i = 0; i < N_SIGNERS; i++) { - for (j = 0; j < N_SIGNERS; j++) { - /* To check whether signing was successful, it suffices to either verify - * the combined signature with the combined public key using - * rustsecp256k1zkp_v0_4_0_schnorrsig_verify, or verify all partial signatures of all - * signers individually. Verifying the combined signature is cheaper but - * verifying the individual partial signatures has the advantage that it - * can be used to determine which of the partial signatures are invalid - * (if any), i.e., which of the partial signatures cause the combined - * signature to be invalid and thus the protocol run to fail. It's also - * fine to first verify the combined sig, and only verify the individual - * sigs if it does not work. - */ - if (!rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &musig_session[i], &signer_data[i][j], &partial_sig[j], &pubkeys[j])) { - return 0; - } + /* To check whether signing was successful, it suffices to either verify + * the aggregate signature with the aggregate public key using + * rustsecp256k1zkp_v0_4_0_schnorrsig_verify, or verify all partial signatures of all + * signers individually. Verifying the aggregate signature is cheaper but + * verifying the individual partial signatures has the advantage that it + * can be used to determine which of the partial signatures are invalid + * (if any), i.e., which of the partial signatures cause the aggregate + * signature to be invalid and thus the protocol run to fail. It's also + * fine to first verify the aggregate sig, and only verify the individual + * sigs if it does not work. + */ + if (!rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &signer[i].partial_sig, &signer[i].pubnonce, &signer[i].pubkey, &cache, &session)) { + return 0; } } - return rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(ctx, &musig_session[0], sig64, partial_sig, N_SIGNERS); + return rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(ctx, sig64, &session, partial_sigs, N_SIGNERS); } int main(void) { rustsecp256k1zkp_v0_4_0_context* ctx; int i; - unsigned char seckeys[N_SIGNERS][32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey pubkeys[N_SIGNERS]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk; + struct signer_secrets_t signer_secrets[N_SIGNERS]; + struct signer_t signers[N_SIGNERS]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkeys_ptr[N_SIGNERS]; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk; unsigned char msg[32] = "this_could_be_the_hash_of_a_msg!"; unsigned char sig[64]; @@ -138,26 +145,27 @@ int sign(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char seckeys[][32] ctx = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY); printf("Creating key pairs......"); for (i = 0; i < N_SIGNERS; i++) { - if (!create_keypair(ctx, seckeys[i], &pubkeys[i])) { + if (!create_keypair(ctx, &signer_secrets[i], &signers[i])) { printf("FAILED\n"); return 1; } + pubkeys_ptr[i] = &signers[i].pubkey; } printf("ok\n"); printf("Combining public keys..."); - if (!rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, NULL, &combined_pk, NULL, pubkeys, N_SIGNERS)) { + if (!rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, NULL, &agg_pk, NULL, pubkeys_ptr, N_SIGNERS)) { printf("FAILED\n"); return 1; } printf("ok\n"); printf("Signing message........."); - if (!sign(ctx, seckeys, pubkeys, msg, sig)) { + if (!sign(ctx, signer_secrets, signers, msg, sig)) { printf("FAILED\n"); return 1; } printf("ok\n"); printf("Verifying signature....."); - if (!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, &combined_pk)) { + if (!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, 32, &agg_pk)) { printf("FAILED\n"); return 1; } @@ -165,4 +173,3 @@ int sign(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char seckeys[][32] rustsecp256k1zkp_v0_4_0_context_destroy(ctx); return 0; } - diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/keyagg.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/keyagg.h new file mode 100644 index 00000000..d85b53c1 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/keyagg.h @@ -0,0 +1,28 @@ +/*********************************************************************** + * Copyright (c) 2021 Jonas Nick * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + ***********************************************************************/ + +#ifndef SECP256K1_MODULE_MUSIG_KEYAGG_H +#define SECP256K1_MODULE_MUSIG_KEYAGG_H + +typedef struct { + rustsecp256k1zkp_v0_4_0_ge pk; + rustsecp256k1zkp_v0_4_0_fe second_pk_x; + const unsigned char *pk_hash; + int is_tweaked; + rustsecp256k1zkp_v0_4_0_scalar tweak; + int internal_key_parity; +} rustsecp256k1zkp_v0_4_0_keyagg_cache_internal; + +/* Requires that the saved point is not infinity */ +static void rustsecp256k1zkp_v0_4_0_point_save(unsigned char *data, rustsecp256k1zkp_v0_4_0_ge *ge); + +static void rustsecp256k1zkp_v0_4_0_point_load(rustsecp256k1zkp_v0_4_0_ge *ge, const unsigned char *data); + +static int rustsecp256k1zkp_v0_4_0_keyagg_cache_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_keyagg_cache_internal *cache_i, const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *cache); + +static void rustsecp256k1zkp_v0_4_0_musig_keyaggcoef(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_keyagg_cache_internal *cache_i, rustsecp256k1zkp_v0_4_0_fe *x); + +#endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/keyagg_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/keyagg_impl.h new file mode 100644 index 00000000..9f2db7ca --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/keyagg_impl.h @@ -0,0 +1,273 @@ +/*********************************************************************** + * Copyright (c) 2021 Jonas Nick * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or https://www.opensource.org/licenses/mit-license.php.* + ***********************************************************************/ + +#ifndef SECP256K1_MODULE_MUSIG_KEYAGG_IMPL +#define SECP256K1_MODULE_MUSIG_KEYAGG_IMPL + +#include "keyagg.h" + + +static void rustsecp256k1zkp_v0_4_0_point_save(unsigned char *data, rustsecp256k1zkp_v0_4_0_ge *ge) { + if (sizeof(rustsecp256k1zkp_v0_4_0_ge_storage) == 64) { + rustsecp256k1zkp_v0_4_0_ge_storage s; + rustsecp256k1zkp_v0_4_0_ge_to_storage(&s, ge); + memcpy(data, &s, sizeof(s)); + } else { + VERIFY_CHECK(!rustsecp256k1zkp_v0_4_0_ge_is_infinity(ge)); + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&ge->x); + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&ge->y); + rustsecp256k1zkp_v0_4_0_fe_get_b32(data, &ge->x); + rustsecp256k1zkp_v0_4_0_fe_get_b32(data + 32, &ge->y); + } +} + +static void rustsecp256k1zkp_v0_4_0_point_load(rustsecp256k1zkp_v0_4_0_ge *ge, const unsigned char *data) { + if (sizeof(rustsecp256k1zkp_v0_4_0_ge_storage) == 64) { + /* When the rustsecp256k1zkp_v0_4_0_ge_storage type is exactly 64 byte, use its + * representation as conversion is very fast. */ + rustsecp256k1zkp_v0_4_0_ge_storage s; + memcpy(&s, data, sizeof(s)); + rustsecp256k1zkp_v0_4_0_ge_from_storage(ge, &s); + } else { + /* Otherwise, fall back to 32-byte big endian for X and Y. */ + rustsecp256k1zkp_v0_4_0_fe x, y; + rustsecp256k1zkp_v0_4_0_fe_set_b32(&x, data); + rustsecp256k1zkp_v0_4_0_fe_set_b32(&y, data + 32); + rustsecp256k1zkp_v0_4_0_ge_set_xy(ge, &x, &y); + } +} + +static const unsigned char rustsecp256k1zkp_v0_4_0_musig_keyagg_cache_magic[4] = { 0xf4, 0xad, 0xbb, 0xdf }; + +/* A keyagg cache consists of + * - 4 byte magic set during initialization to allow detecting an uninitialized + * object. + * - 64 byte aggregate (and potentially tweaked) public key + * - 32 byte X-coordinate of "second" public key (0 if not present) + * - 32 byte hash of all public keys + * - 1 byte indicating if the public key is tweaked and if so, also the parity + * of the internal key + * - 32 byte tweak + */ +/* Requires that cache_i->pk is not infinity */ +static void rustsecp256k1zkp_v0_4_0_keyagg_cache_save(rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *cache, rustsecp256k1zkp_v0_4_0_keyagg_cache_internal *cache_i) { + unsigned char *ptr = cache->data; + memcpy(ptr, rustsecp256k1zkp_v0_4_0_musig_keyagg_cache_magic, 4); + ptr += 4; + rustsecp256k1zkp_v0_4_0_point_save(ptr, &cache_i->pk); + ptr += 64; + rustsecp256k1zkp_v0_4_0_fe_get_b32(ptr, &cache_i->second_pk_x); + ptr += 32; + memmove(ptr, cache_i->pk_hash, 32); + ptr += 32; + *ptr = cache_i->is_tweaked; + *ptr |= cache_i->internal_key_parity << 1; + ptr += 1; + rustsecp256k1zkp_v0_4_0_scalar_get_b32(ptr, &cache_i->tweak); +} + +static int rustsecp256k1zkp_v0_4_0_keyagg_cache_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_keyagg_cache_internal *cache_i, const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *cache) { + const unsigned char *ptr = cache->data; + ARG_CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(ptr, rustsecp256k1zkp_v0_4_0_musig_keyagg_cache_magic, 4) == 0); + ptr += 4; + rustsecp256k1zkp_v0_4_0_point_load(&cache_i->pk, ptr); + ptr += 64; + rustsecp256k1zkp_v0_4_0_fe_set_b32(&cache_i->second_pk_x, ptr); + ptr += 32; + cache_i->pk_hash = ptr; + ptr += 32; + cache_i->is_tweaked = *ptr & 1; + cache_i->internal_key_parity = *ptr & 2; + ptr += 1; + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&cache_i->tweak, ptr, NULL); + return 1; +} + +/* Initializes SHA256 with fixed midstate. This midstate was computed by applying + * SHA256 to SHA256("KeyAgg list")||SHA256("KeyAgg list"). */ +static void rustsecp256k1zkp_v0_4_0_musig_keyagglist_sha256(rustsecp256k1zkp_v0_4_0_sha256 *sha) { + rustsecp256k1zkp_v0_4_0_sha256_initialize(sha); + + sha->s[0] = 0xb399d5e0ul; + sha->s[1] = 0xc8fff302ul; + sha->s[2] = 0x6badac71ul; + sha->s[3] = 0x07c5b7f1ul; + sha->s[4] = 0x9701e2eful; + sha->s[5] = 0x2a72ecf8ul; + sha->s[6] = 0x201a4c7bul; + sha->s[7] = 0xab148a38ul; + sha->bytes = 64; +} + +/* Computes pk_hash = SHA256(pk[0], ..., pk[np-1]) */ +static int rustsecp256k1zkp_v0_4_0_musig_compute_pk_hash(const rustsecp256k1zkp_v0_4_0_context *ctx, unsigned char *pk_hash, const rustsecp256k1zkp_v0_4_0_xonly_pubkey * const* pk, size_t np) { + rustsecp256k1zkp_v0_4_0_sha256 sha; + size_t i; + + rustsecp256k1zkp_v0_4_0_musig_keyagglist_sha256(&sha); + for (i = 0; i < np; i++) { + unsigned char ser[32]; + if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, ser, pk[i])) { + return 0; + } + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, ser, 32); + } + rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, pk_hash); + return 1; +} + +/* Initializes SHA256 with fixed midstate. This midstate was computed by applying + * SHA256 to SHA256("KeyAgg coefficient")||SHA256("KeyAgg coefficient"). */ +static void rustsecp256k1zkp_v0_4_0_musig_keyaggcoef_sha256(rustsecp256k1zkp_v0_4_0_sha256 *sha) { + rustsecp256k1zkp_v0_4_0_sha256_initialize(sha); + + sha->s[0] = 0x6ef02c5aul; + sha->s[1] = 0x06a480deul; + sha->s[2] = 0x1f298665ul; + sha->s[3] = 0x1d1134f2ul; + sha->s[4] = 0x56a0b063ul; + sha->s[5] = 0x52da4147ul; + sha->s[6] = 0xf280d9d4ul; + sha->s[7] = 0x4484be15ul; + sha->bytes = 64; +} + +/* Compute KeyAgg coefficient which is constant 1 for the second pubkey and + * SHA256(pk_hash, x) where pk_hash is the hash of public keys otherwise. second_pk_x + * can be 0 in case there is no second_pk. Assumes both field elements x and + * second_pk_x are normalized. */ +static void rustsecp256k1zkp_v0_4_0_musig_keyaggcoef_internal(rustsecp256k1zkp_v0_4_0_scalar *r, const unsigned char *pk_hash, const rustsecp256k1zkp_v0_4_0_fe *x, const rustsecp256k1zkp_v0_4_0_fe *second_pk_x) { + rustsecp256k1zkp_v0_4_0_sha256 sha; + unsigned char buf[32]; + + if (rustsecp256k1zkp_v0_4_0_fe_cmp_var(x, second_pk_x) == 0) { + rustsecp256k1zkp_v0_4_0_scalar_set_int(r, 1); + } else { + rustsecp256k1zkp_v0_4_0_musig_keyaggcoef_sha256(&sha); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, pk_hash, 32); + rustsecp256k1zkp_v0_4_0_fe_get_b32(buf, x); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, buf, 32); + rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, buf); + rustsecp256k1zkp_v0_4_0_scalar_set_b32(r, buf, NULL); + } + +} + +/* Assumes both field elements x and second_pk_x are normalized. */ +static void rustsecp256k1zkp_v0_4_0_musig_keyaggcoef(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_keyagg_cache_internal *cache_i, rustsecp256k1zkp_v0_4_0_fe *x) { + rustsecp256k1zkp_v0_4_0_musig_keyaggcoef_internal(r, cache_i->pk_hash, x, &cache_i->second_pk_x); +} + +typedef struct { + const rustsecp256k1zkp_v0_4_0_context *ctx; + /* pk_hash is the hash of the public keys */ + unsigned char pk_hash[32]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey * const* pks; + rustsecp256k1zkp_v0_4_0_fe second_pk_x; +} rustsecp256k1zkp_v0_4_0_musig_pubkey_agg_ecmult_data; + +/* Callback for batch EC multiplication to compute pk_hash_0*P0 + pk_hash_1*P1 + ... */ +static int rustsecp256k1zkp_v0_4_0_musig_pubkey_agg_callback(rustsecp256k1zkp_v0_4_0_scalar *sc, rustsecp256k1zkp_v0_4_0_ge *pt, size_t idx, void *data) { + rustsecp256k1zkp_v0_4_0_musig_pubkey_agg_ecmult_data *ctx = (rustsecp256k1zkp_v0_4_0_musig_pubkey_agg_ecmult_data *) data; + int ret; + ret = rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx->ctx, pt, ctx->pks[idx]); + /* pubkey_load can't fail because the same pks have already been loaded (and + * we test this) */ + VERIFY_CHECK(ret); + rustsecp256k1zkp_v0_4_0_musig_keyaggcoef_internal(sc, ctx->pk_hash, &pt->x, &ctx->second_pk_x); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_scratch_space *scratch, rustsecp256k1zkp_v0_4_0_xonly_pubkey *agg_pk, rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, const rustsecp256k1zkp_v0_4_0_xonly_pubkey * const* pubkeys, size_t n_pubkeys) { + rustsecp256k1zkp_v0_4_0_musig_pubkey_agg_ecmult_data ecmult_data; + rustsecp256k1zkp_v0_4_0_gej pkj; + rustsecp256k1zkp_v0_4_0_ge pkp; + size_t i; + + VERIFY_CHECK(ctx != NULL); + if (agg_pk != NULL) { + memset(agg_pk, 0, sizeof(*agg_pk)); + } + ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(pubkeys != NULL); + ARG_CHECK(n_pubkeys > 0); + + ecmult_data.ctx = ctx; + ecmult_data.pks = pubkeys; + /* No point on the curve has an X coordinate equal to 0 */ + rustsecp256k1zkp_v0_4_0_fe_set_int(&ecmult_data.second_pk_x, 0); + for (i = 1; i < n_pubkeys; i++) { + rustsecp256k1zkp_v0_4_0_ge pt; + if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &pt, pubkeys[i])) { + return 0; + } + if (rustsecp256k1zkp_v0_4_0_memcmp_var(pubkeys[0], pubkeys[i], sizeof(*pubkeys[0])) != 0) { + ecmult_data.second_pk_x = pt.x; + break; + } + } + + if (!rustsecp256k1zkp_v0_4_0_musig_compute_pk_hash(ctx, ecmult_data.pk_hash, pubkeys, n_pubkeys)) { + return 0; + } + if (!rustsecp256k1zkp_v0_4_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &pkj, NULL, rustsecp256k1zkp_v0_4_0_musig_pubkey_agg_callback, (void *) &ecmult_data, n_pubkeys)) { + /* The current implementation of ecmult_multi_var makes this code unreachable with tests. */ + return 0; + } + rustsecp256k1zkp_v0_4_0_ge_set_gej(&pkp, &pkj); + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&pkp.y); + /* The resulting public key is infinity with negligible probability */ + VERIFY_CHECK(!rustsecp256k1zkp_v0_4_0_ge_is_infinity(&pkp)); + if (keyagg_cache != NULL) { + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i = { 0 }; + cache_i.pk = pkp; + cache_i.second_pk_x = ecmult_data.second_pk_x; + cache_i.pk_hash = ecmult_data.pk_hash; + rustsecp256k1zkp_v0_4_0_keyagg_cache_save(keyagg_cache, &cache_i); + } + + rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y(&pkp); + if (agg_pk != NULL) { + rustsecp256k1zkp_v0_4_0_xonly_pubkey_save(agg_pk, &pkp); + } + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_pubkey *output_pubkey, const unsigned char *tweak32, rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache) { + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + int overflow = 0; + + VERIFY_CHECK(ctx != NULL); + if (output_pubkey != NULL) { + memset(output_pubkey, 0, sizeof(*output_pubkey)); + } + ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(keyagg_cache != NULL); + ARG_CHECK(tweak32 != NULL); + + if(!rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, keyagg_cache)) { + return 0; + } + /* This function can only be called once because otherwise signing would not + * succeed */ + ARG_CHECK(cache_i.is_tweaked == 0); + + cache_i.internal_key_parity = rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y(&cache_i.pk); + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&cache_i.tweak, tweak32, &overflow); + if(overflow || !rustsecp256k1zkp_v0_4_0_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &cache_i.pk, &cache_i.tweak)) { + return 0; + } + cache_i.is_tweaked = 1; + /* eckey_pubkey_tweak_add fails if cache_i.pk is infinity */ + VERIFY_CHECK(!rustsecp256k1zkp_v0_4_0_ge_is_infinity(&cache_i.pk)); + rustsecp256k1zkp_v0_4_0_keyagg_cache_save(keyagg_cache, &cache_i); + if (output_pubkey != NULL) { + rustsecp256k1zkp_v0_4_0_pubkey_save(output_pubkey, &cache_i.pk); + } + return 1; +} + +#endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/main_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/main_impl.h index 432e6440..914d7d55 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/main_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/main_impl.h @@ -11,692 +11,8 @@ #include "include/secp256k1.h" #include "include/secp256k1_musig.h" #include "hash.h" - -/* Computes ell = SHA256(pk[0], ..., pk[np-1]) */ -static int rustsecp256k1zkp_v0_4_0_musig_compute_ell(const rustsecp256k1zkp_v0_4_0_context *ctx, unsigned char *ell, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk, size_t np) { - rustsecp256k1zkp_v0_4_0_sha256 sha; - size_t i; - - rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); - for (i = 0; i < np; i++) { - unsigned char ser[32]; - if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, ser, &pk[i])) { - return 0; - } - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, ser, 32); - } - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, ell); - return 1; -} - -/* Initializes SHA256 with fixed midstate. This midstate was computed by applying - * SHA256 to SHA256("MuSig coefficient")||SHA256("MuSig coefficient"). */ -static void rustsecp256k1zkp_v0_4_0_musig_sha256_init_tagged(rustsecp256k1zkp_v0_4_0_sha256 *sha) { - rustsecp256k1zkp_v0_4_0_sha256_initialize(sha); - - sha->s[0] = 0x0fd0690cul; - sha->s[1] = 0xfefeae97ul; - sha->s[2] = 0x996eac7ful; - sha->s[3] = 0x5c30d864ul; - sha->s[4] = 0x8c4a0573ul; - sha->s[5] = 0xaca1a22ful; - sha->s[6] = 0x6f43b801ul; - sha->s[7] = 0x85ce27cdul; - sha->bytes = 64; -} - -/* Compute r = SHA256(ell, idx). The four bytes of idx are serialized least significant byte first. */ -static void rustsecp256k1zkp_v0_4_0_musig_coefficient(rustsecp256k1zkp_v0_4_0_scalar *r, const unsigned char *ell, uint32_t idx) { - rustsecp256k1zkp_v0_4_0_sha256 sha; - unsigned char buf[32]; - size_t i; - - rustsecp256k1zkp_v0_4_0_musig_sha256_init_tagged(&sha); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, ell, 32); - /* We're hashing the index of the signer instead of its public key as specified - * in the MuSig paper. This reduces the total amount of data that needs to be - * hashed. - * Additionally, it prevents creating identical musig_coefficients for identical - * public keys. A participant Bob could choose his public key to be the same as - * Alice's, then replay Alice's messages (nonce and partial signature) to create - * a valid partial signature. This is not a problem for MuSig per se, but could - * result in subtle issues with protocols building on threshold signatures. - * With the assumption that public keys are unique, hashing the index is - * equivalent to hashing the public key. Because the public key can be - * identified by the index given the ordered list of public keys (included in - * ell), the index is just a different encoding of the public key.*/ - for (i = 0; i < sizeof(uint32_t); i++) { - unsigned char c = idx; - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, &c, 1); - idx >>= 8; - } - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, buf); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(r, buf, NULL); -} - -typedef struct { - const rustsecp256k1zkp_v0_4_0_context *ctx; - unsigned char ell[32]; - const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pks; -} rustsecp256k1zkp_v0_4_0_musig_pubkey_combine_ecmult_data; - -/* Callback for batch EC multiplication to compute ell_0*P0 + ell_1*P1 + ... */ -static int rustsecp256k1zkp_v0_4_0_musig_pubkey_combine_callback(rustsecp256k1zkp_v0_4_0_scalar *sc, rustsecp256k1zkp_v0_4_0_ge *pt, size_t idx, void *data) { - rustsecp256k1zkp_v0_4_0_musig_pubkey_combine_ecmult_data *ctx = (rustsecp256k1zkp_v0_4_0_musig_pubkey_combine_ecmult_data *) data; - rustsecp256k1zkp_v0_4_0_musig_coefficient(sc, ctx->ell, idx); - return rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx->ctx, pt, &ctx->pks[idx]); -} - -static void rustsecp256k1zkp_v0_4_0_musig_signers_init(rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, uint32_t n_signers) { - uint32_t i; - for (i = 0; i < n_signers; i++) { - memset(&signers[i], 0, sizeof(signers[i])); - signers[i].index = i; - signers[i].present = 0; - } -} - -static const uint64_t pre_session_magic = 0xf4adbbdf7c7dd304UL; - -int rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_scratch_space *scratch, rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkeys, size_t n_pubkeys) { - rustsecp256k1zkp_v0_4_0_musig_pubkey_combine_ecmult_data ecmult_data; - rustsecp256k1zkp_v0_4_0_gej pkj; - rustsecp256k1zkp_v0_4_0_ge pkp; - int pk_parity; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(combined_pk != NULL); - ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(pubkeys != NULL); - ARG_CHECK(n_pubkeys > 0); - - ecmult_data.ctx = ctx; - ecmult_data.pks = pubkeys; - if (!rustsecp256k1zkp_v0_4_0_musig_compute_ell(ctx, ecmult_data.ell, pubkeys, n_pubkeys)) { - return 0; - } - if (!rustsecp256k1zkp_v0_4_0_ecmult_multi_var(&ctx->error_callback, &ctx->ecmult_ctx, scratch, &pkj, NULL, rustsecp256k1zkp_v0_4_0_musig_pubkey_combine_callback, (void *) &ecmult_data, n_pubkeys)) { - return 0; - } - rustsecp256k1zkp_v0_4_0_ge_set_gej(&pkp, &pkj); - rustsecp256k1zkp_v0_4_0_fe_normalize(&pkp.y); - pk_parity = rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y(&pkp); - rustsecp256k1zkp_v0_4_0_xonly_pubkey_save(combined_pk, &pkp); - - if (pre_session != NULL) { - pre_session->magic = pre_session_magic; - memcpy(pre_session->pk_hash, ecmult_data.ell, 32); - pre_session->pk_parity = pk_parity; - pre_session->is_tweaked = 0; - } - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, rustsecp256k1zkp_v0_4_0_pubkey *output_pubkey, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *internal_pubkey, const unsigned char *tweak32) { - rustsecp256k1zkp_v0_4_0_ge pk; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(pre_session != NULL); - ARG_CHECK(pre_session->magic == pre_session_magic); - /* This function can only be called once because otherwise signing would not - * succeed */ - ARG_CHECK(pre_session->is_tweaked == 0); - - pre_session->internal_key_parity = pre_session->pk_parity; - if(!rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_add(ctx, output_pubkey, internal_pubkey, tweak32)) { - return 0; - } - - memcpy(pre_session->tweak, tweak32, 32); - pre_session->is_tweaked = 1; - - if (!rustsecp256k1zkp_v0_4_0_pubkey_load(ctx, &pk, output_pubkey)) { - return 0; - } - pre_session->pk_parity = rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y(&pk); - return 1; -} - -static const uint64_t session_magic = 0xd92e6fc1ee41b4cbUL; - -int rustsecp256k1zkp_v0_4_0_musig_session_init(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session *session, rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, unsigned char *nonce_commitment32, const unsigned char *session_id32, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, const rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, size_t n_signers, size_t my_index, const unsigned char *seckey) { - unsigned char combined_ser[32]; - int overflow; - rustsecp256k1zkp_v0_4_0_scalar secret; - rustsecp256k1zkp_v0_4_0_scalar mu; - rustsecp256k1zkp_v0_4_0_sha256 sha; - rustsecp256k1zkp_v0_4_0_gej pj; - rustsecp256k1zkp_v0_4_0_ge p; - unsigned char nonce_ser[32]; - size_t nonce_ser_size = sizeof(nonce_ser); - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); - ARG_CHECK(session != NULL); - ARG_CHECK(signers != NULL); - ARG_CHECK(nonce_commitment32 != NULL); - ARG_CHECK(session_id32 != NULL); - ARG_CHECK(combined_pk != NULL); - ARG_CHECK(pre_session != NULL); - ARG_CHECK(pre_session->magic == pre_session_magic); - ARG_CHECK(seckey != NULL); - - ARG_CHECK(n_signers > 0); - ARG_CHECK(n_signers <= UINT32_MAX); - ARG_CHECK(my_index < n_signers); - - memset(session, 0, sizeof(*session)); - - session->magic = session_magic; - if (msg32 != NULL) { - memcpy(session->msg, msg32, 32); - session->is_msg_set = 1; - } else { - session->is_msg_set = 0; - } - memcpy(&session->combined_pk, combined_pk, sizeof(*combined_pk)); - session->pre_session = *pre_session; - session->has_secret_data = 1; - session->n_signers = (uint32_t) n_signers; - rustsecp256k1zkp_v0_4_0_musig_signers_init(signers, session->n_signers); - - /* Compute secret key */ - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&secret, seckey, &overflow); - if (overflow) { - rustsecp256k1zkp_v0_4_0_scalar_clear(&secret); - return 0; - } - rustsecp256k1zkp_v0_4_0_musig_coefficient(&mu, session->pre_session.pk_hash, (uint32_t) my_index); - /* Compute the signer's public key point and determine if the secret is - * negated before signing. That happens if if the signer's pubkey has an odd - * Y coordinate XOR the MuSig-combined pubkey has an odd Y coordinate XOR - * (if tweaked) the internal key has an odd Y coordinate. - * - * This can be seen by looking at the secret key belonging to `combined_pk`. - * Let's define - * P' := mu_0*|P_0| + ... + mu_n*|P_n| where P_i is the i-th public key - * point x_i*G, mu_i is the i-th musig coefficient and |.| is a function - * that normalizes a point to an even Y by negating if necessary similar to - * rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y. Then we have - * P := |P'| + t*G where t is the tweak. - * And the combined xonly public key is - * |P| = x*G - * where x = sum_i(b_i*mu_i*x_i) + b'*t - * b' = -1 if P != |P|, 1 otherwise - * b_i = -1 if (P_i != |P_i| XOR P' != |P'| XOR P != |P|) and 1 - * otherwise. - */ - rustsecp256k1zkp_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &secret); - rustsecp256k1zkp_v0_4_0_ge_set_gej(&p, &pj); - rustsecp256k1zkp_v0_4_0_fe_normalize(&p.y); - if((rustsecp256k1zkp_v0_4_0_fe_is_odd(&p.y) - + session->pre_session.pk_parity - + (session->pre_session.is_tweaked - && session->pre_session.internal_key_parity)) - % 2 == 1) { - rustsecp256k1zkp_v0_4_0_scalar_negate(&secret, &secret); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(&secret, &secret, &mu); - rustsecp256k1zkp_v0_4_0_scalar_get_b32(session->seckey, &secret); - - /* Compute secret nonce */ - rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, session_id32, 32); - if (session->is_msg_set) { - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg32, 32); - } - rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, combined_ser, combined_pk); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, combined_ser, 32); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, seckey, 32); - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, session->secnonce); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&secret, session->secnonce, &overflow); - if (overflow) { - rustsecp256k1zkp_v0_4_0_scalar_clear(&secret); - return 0; - } - - /* Compute public nonce and commitment */ - rustsecp256k1zkp_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &secret); - rustsecp256k1zkp_v0_4_0_ge_set_gej(&p, &pj); - rustsecp256k1zkp_v0_4_0_fe_normalize_var(&p.y); - session->partial_nonce_parity = rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y(&p); - rustsecp256k1zkp_v0_4_0_xonly_pubkey_save(&session->nonce, &p); - - rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); - rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, nonce_ser, &session->nonce); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, nonce_ser, nonce_ser_size); - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, nonce_commitment32); - - session->round = 0; - rustsecp256k1zkp_v0_4_0_scalar_clear(&secret); - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session *session, rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, unsigned char *nonce, const unsigned char *const *commitments, size_t n_commitments, const unsigned char *msg32) { - rustsecp256k1zkp_v0_4_0_sha256 sha; - unsigned char nonce_commitments_hash[32]; - size_t i; - unsigned char nonce_ser[32]; - size_t nonce_ser_size = sizeof(nonce_ser); - (void) ctx; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(session != NULL); - ARG_CHECK(session->magic == session_magic); - ARG_CHECK(signers != NULL); - ARG_CHECK(nonce != NULL); - ARG_CHECK(commitments != NULL); - - ARG_CHECK(session->round == 0); - /* If the message was not set during initialization it must be set now. */ - ARG_CHECK(!(!session->is_msg_set && msg32 == NULL)); - /* The message can only be set once. */ - ARG_CHECK(!(session->is_msg_set && msg32 != NULL)); - ARG_CHECK(session->has_secret_data); - ARG_CHECK(n_commitments == session->n_signers); - for (i = 0; i < n_commitments; i++) { - ARG_CHECK(commitments[i] != NULL); - } - - if (msg32 != NULL) { - memcpy(session->msg, msg32, 32); - session->is_msg_set = 1; - } - rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); - for (i = 0; i < n_commitments; i++) { - memcpy(signers[i].nonce_commitment, commitments[i], 32); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, commitments[i], 32); - } - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, nonce_commitments_hash); - memcpy(session->nonce_commitments_hash, nonce_commitments_hash, 32); - - rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, nonce_ser, &session->nonce); - memcpy(nonce, &nonce_ser, nonce_ser_size); - session->round = 1; - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session *session, rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, const rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, const unsigned char *const *commitments, size_t n_signers) { - size_t i; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(session != NULL); - ARG_CHECK(signers != NULL); - ARG_CHECK(msg32 != NULL); - ARG_CHECK(combined_pk != NULL); - ARG_CHECK(pre_session != NULL); - ARG_CHECK(pre_session->magic == pre_session_magic); - ARG_CHECK(commitments != NULL); - /* Check n_signers before checking commitments to allow testing the case where - * n_signers is big without allocating the space. */ - ARG_CHECK(n_signers > 0); - ARG_CHECK(n_signers <= UINT32_MAX); - for (i = 0; i < n_signers; i++) { - ARG_CHECK(commitments[i] != NULL); - } - (void) ctx; - - memset(session, 0, sizeof(*session)); - - session->magic = session_magic; - memcpy(&session->combined_pk, combined_pk, sizeof(*combined_pk)); - session->pre_session = *pre_session; - session->n_signers = (uint32_t) n_signers; - rustsecp256k1zkp_v0_4_0_musig_signers_init(signers, session->n_signers); - - session->pre_session = *pre_session; - session->is_msg_set = 1; - memcpy(session->msg, msg32, 32); - session->has_secret_data = 0; - - for (i = 0; i < n_signers; i++) { - memcpy(signers[i].nonce_commitment, commitments[i], 32); - } - session->round = 1; - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_set_nonce(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signer, const unsigned char *nonce) { - rustsecp256k1zkp_v0_4_0_sha256 sha; - unsigned char commit[32]; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(signer != NULL); - ARG_CHECK(nonce != NULL); - - rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, nonce, 32); - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, commit); - - if (memcmp(commit, signer->nonce_commitment, 32) != 0) { - return 0; - } - memcpy(&signer->nonce, nonce, sizeof(*nonce)); - if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(ctx, &signer->nonce, nonce)) { - return 0; - } - signer->present = 1; - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session *session, const rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers, size_t n_signers, int *nonce_parity, const rustsecp256k1zkp_v0_4_0_pubkey *adaptor) { - rustsecp256k1zkp_v0_4_0_gej combined_noncej; - rustsecp256k1zkp_v0_4_0_ge combined_noncep; - rustsecp256k1zkp_v0_4_0_ge noncep; - rustsecp256k1zkp_v0_4_0_sha256 sha; - unsigned char nonce_commitments_hash[32]; - size_t i; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(session != NULL); - ARG_CHECK(signers != NULL); - ARG_CHECK(session->magic == session_magic); - ARG_CHECK(session->round == 1); - ARG_CHECK(n_signers == session->n_signers); - - rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); - rustsecp256k1zkp_v0_4_0_gej_set_infinity(&combined_noncej); - for (i = 0; i < n_signers; i++) { - if (!signers[i].present) { - return 0; - } - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, signers[i].nonce_commitment, 32); - rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &noncep, &signers[i].nonce); - rustsecp256k1zkp_v0_4_0_gej_add_ge_var(&combined_noncej, &combined_noncej, &noncep, NULL); - } - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, nonce_commitments_hash); - /* If the signers' commitments changed between get_public_nonce and now we - * have to abort because in that case they may have seen our nonce before - * creating their commitment. That can happen if the signer_data given to - * this function is different to the signer_data given to get_public_nonce. - * */ - if (session->has_secret_data - && memcmp(session->nonce_commitments_hash, nonce_commitments_hash, 32) != 0) { - return 0; - } - - /* Add public adaptor to nonce */ - if (adaptor != NULL) { - rustsecp256k1zkp_v0_4_0_pubkey_load(ctx, &noncep, adaptor); - rustsecp256k1zkp_v0_4_0_gej_add_ge_var(&combined_noncej, &combined_noncej, &noncep, NULL); - } - - /* Negate nonce if Y coordinate is not square */ - rustsecp256k1zkp_v0_4_0_ge_set_gej(&combined_noncep, &combined_noncej); - rustsecp256k1zkp_v0_4_0_fe_normalize_var(&combined_noncep.y); - session->combined_nonce_parity = rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y(&combined_noncep); - if (nonce_parity != NULL) { - *nonce_parity = session->combined_nonce_parity; - } - rustsecp256k1zkp_v0_4_0_xonly_pubkey_save(&session->combined_nonce, &combined_noncep); - session->round = 2; - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_partial_signature_serialize(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *out32, const rustsecp256k1zkp_v0_4_0_musig_partial_signature* sig) { - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(out32 != NULL); - ARG_CHECK(sig != NULL); - memcpy(out32, sig->data, 32); - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_partial_signature_parse(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_partial_signature* sig, const unsigned char *in32) { - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(sig != NULL); - ARG_CHECK(in32 != NULL); - memcpy(sig->data, in32, 32); - return 1; -} - -/* Compute msghash = SHA256(combined_nonce, combined_pk, msg) */ -static void rustsecp256k1zkp_v0_4_0_musig_compute_messagehash(const rustsecp256k1zkp_v0_4_0_context *ctx, unsigned char *msghash, const rustsecp256k1zkp_v0_4_0_musig_session *session) { - unsigned char buf[32]; - rustsecp256k1zkp_v0_4_0_ge rp; - rustsecp256k1zkp_v0_4_0_sha256 sha; - - VERIFY_CHECK(session->round >= 2); - - rustsecp256k1zkp_v0_4_0_schnorrsig_sha256_tagged(&sha); - rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &rp, &session->combined_nonce); - rustsecp256k1zkp_v0_4_0_fe_get_b32(buf, &rp.x); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, buf, 32); - - rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, buf, &session->combined_pk); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, buf, 32); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, session->msg, 32); - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, msghash); -} - -int rustsecp256k1zkp_v0_4_0_musig_partial_sign(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_musig_session *session, rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sig) { - unsigned char msghash[32]; - int overflow; - rustsecp256k1zkp_v0_4_0_scalar sk; - rustsecp256k1zkp_v0_4_0_scalar e, k; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(partial_sig != NULL); - ARG_CHECK(session != NULL); - ARG_CHECK(session->magic == session_magic); - ARG_CHECK(session->round == 2); - ARG_CHECK(session->has_secret_data); - - /* build message hash */ - rustsecp256k1zkp_v0_4_0_musig_compute_messagehash(ctx, msghash, session); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&e, msghash, NULL); - - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&sk, session->seckey, &overflow); - if (overflow) { - rustsecp256k1zkp_v0_4_0_scalar_clear(&sk); - return 0; - } - - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&k, session->secnonce, &overflow); - if (overflow || rustsecp256k1zkp_v0_4_0_scalar_is_zero(&k)) { - rustsecp256k1zkp_v0_4_0_scalar_clear(&sk); - rustsecp256k1zkp_v0_4_0_scalar_clear(&k); - return 0; - } - if (session->partial_nonce_parity != session->combined_nonce_parity) { - rustsecp256k1zkp_v0_4_0_scalar_negate(&k, &k); - } - - /* Sign */ - rustsecp256k1zkp_v0_4_0_scalar_mul(&e, &e, &sk); - rustsecp256k1zkp_v0_4_0_scalar_add(&e, &e, &k); - rustsecp256k1zkp_v0_4_0_scalar_get_b32(&partial_sig->data[0], &e); - rustsecp256k1zkp_v0_4_0_scalar_clear(&sk); - rustsecp256k1zkp_v0_4_0_scalar_clear(&k); - - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_musig_session *session, unsigned char *sig64, const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sigs, size_t n_sigs) { - size_t i; - rustsecp256k1zkp_v0_4_0_scalar s; - rustsecp256k1zkp_v0_4_0_ge noncep; - (void) ctx; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(partial_sigs != NULL); - ARG_CHECK(session != NULL); - ARG_CHECK(session->magic == session_magic); - ARG_CHECK(session->round == 2); - - if (n_sigs != session->n_signers) { - return 0; - } - rustsecp256k1zkp_v0_4_0_scalar_clear(&s); - for (i = 0; i < n_sigs; i++) { - int overflow; - rustsecp256k1zkp_v0_4_0_scalar term; - - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&term, partial_sigs[i].data, &overflow); - if (overflow) { - return 0; - } - rustsecp256k1zkp_v0_4_0_scalar_add(&s, &s, &term); - } - - /* If there is a tweak then add (or subtract) `msghash` times `tweak` to `s`.*/ - if (session->pre_session.is_tweaked) { - unsigned char msghash[32]; - rustsecp256k1zkp_v0_4_0_scalar e, scalar_tweak; - int overflow = 0; - - rustsecp256k1zkp_v0_4_0_musig_compute_messagehash(ctx, msghash, session); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&e, msghash, NULL); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&scalar_tweak, session->pre_session.tweak, &overflow); - if (overflow || !rustsecp256k1zkp_v0_4_0_eckey_privkey_tweak_mul(&e, &scalar_tweak)) { - /* This mimics the behavior of rustsecp256k1zkp_v0_4_0_ec_seckey_tweak_mul regarding - * overflow and tweak being 0. */ - return 0; - } - if (session->pre_session.pk_parity) { - rustsecp256k1zkp_v0_4_0_scalar_negate(&e, &e); - } - rustsecp256k1zkp_v0_4_0_scalar_add(&s, &s, &e); - } - - rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &noncep, &session->combined_nonce); - VERIFY_CHECK(!rustsecp256k1zkp_v0_4_0_fe_is_odd(&noncep.y)); - rustsecp256k1zkp_v0_4_0_fe_normalize(&noncep.x); - rustsecp256k1zkp_v0_4_0_fe_get_b32(&sig64[0], &noncep.x); - rustsecp256k1zkp_v0_4_0_scalar_get_b32(&sig64[32], &s); - - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_musig_session *session, const rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signer, const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sig, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey) { - unsigned char msghash[32]; - rustsecp256k1zkp_v0_4_0_scalar s; - rustsecp256k1zkp_v0_4_0_scalar e; - rustsecp256k1zkp_v0_4_0_scalar mu; - rustsecp256k1zkp_v0_4_0_gej pkj; - rustsecp256k1zkp_v0_4_0_gej rj; - rustsecp256k1zkp_v0_4_0_ge pkp; - rustsecp256k1zkp_v0_4_0_ge rp; - int overflow; - - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); - ARG_CHECK(session != NULL); - ARG_CHECK(signer != NULL); - ARG_CHECK(partial_sig != NULL); - ARG_CHECK(pubkey != NULL); - ARG_CHECK(session->magic == session_magic); - ARG_CHECK(session->round == 2); - ARG_CHECK(signer->present); - - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&s, partial_sig->data, &overflow); - if (overflow) { - return 0; - } - rustsecp256k1zkp_v0_4_0_musig_compute_messagehash(ctx, msghash, session); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&e, msghash, NULL); - - /* Multiplying the messagehash by the musig coefficient is equivalent - * to multiplying the signer's public key by the coefficient, except - * much easier to do. */ - rustsecp256k1zkp_v0_4_0_musig_coefficient(&mu, session->pre_session.pk_hash, signer->index); - rustsecp256k1zkp_v0_4_0_scalar_mul(&e, &e, &mu); - - if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &rp, &signer->nonce)) { - return 0; - } - - /* If the MuSig-combined point has an odd Y coordinate, the signers will - * sign for the negation of their individual xonly public key such that the - * combined signature is valid for the MuSig aggregated xonly key. If the - * MuSig-combined point was tweaked then `e` is negated if the combined key - * has an odd Y coordinate XOR the internal key has an odd Y coordinate.*/ - if (session->pre_session.pk_parity - != (session->pre_session.is_tweaked - && session->pre_session.internal_key_parity)) { - rustsecp256k1zkp_v0_4_0_scalar_negate(&e, &e); - } - - /* Compute rj = s*G + (-e)*pkj */ - rustsecp256k1zkp_v0_4_0_scalar_negate(&e, &e); - if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &pkp, pubkey)) { - return 0; - } - rustsecp256k1zkp_v0_4_0_gej_set_ge(&pkj, &pkp); - rustsecp256k1zkp_v0_4_0_ecmult(&ctx->ecmult_ctx, &rj, &pkj, &e, &s); - - if (!session->combined_nonce_parity) { - rustsecp256k1zkp_v0_4_0_ge_neg(&rp, &rp); - } - rustsecp256k1zkp_v0_4_0_gej_add_ge_var(&rj, &rj, &rp, NULL); - - return rustsecp256k1zkp_v0_4_0_gej_is_infinity(&rj); -} - -int rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_partial_signature *adaptor_sig, const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sig, const unsigned char *sec_adaptor32, int nonce_parity) { - rustsecp256k1zkp_v0_4_0_scalar s; - rustsecp256k1zkp_v0_4_0_scalar t; - int overflow; - - (void) ctx; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(adaptor_sig != NULL); - ARG_CHECK(partial_sig != NULL); - ARG_CHECK(sec_adaptor32 != NULL); - - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&s, partial_sig->data, &overflow); - if (overflow) { - return 0; - } - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&t, sec_adaptor32, &overflow); - if (overflow) { - rustsecp256k1zkp_v0_4_0_scalar_clear(&t); - return 0; - } - - if (nonce_parity) { - rustsecp256k1zkp_v0_4_0_scalar_negate(&t, &t); - } - - rustsecp256k1zkp_v0_4_0_scalar_add(&s, &s, &t); - rustsecp256k1zkp_v0_4_0_scalar_get_b32(adaptor_sig->data, &s); - rustsecp256k1zkp_v0_4_0_scalar_clear(&t); - return 1; -} - -int rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sec_adaptor32, const unsigned char *sig64, const rustsecp256k1zkp_v0_4_0_musig_partial_signature *partial_sigs, size_t n_partial_sigs, int nonce_parity) { - rustsecp256k1zkp_v0_4_0_scalar t; - rustsecp256k1zkp_v0_4_0_scalar s; - int overflow; - size_t i; - - (void) ctx; - VERIFY_CHECK(ctx != NULL); - ARG_CHECK(sec_adaptor32 != NULL); - ARG_CHECK(sig64 != NULL); - ARG_CHECK(partial_sigs != NULL); - - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&t, &sig64[32], &overflow); - if (overflow) { - return 0; - } - rustsecp256k1zkp_v0_4_0_scalar_negate(&t, &t); - - for (i = 0; i < n_partial_sigs; i++) { - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&s, partial_sigs[i].data, &overflow); - if (overflow) { - rustsecp256k1zkp_v0_4_0_scalar_clear(&t); - return 0; - } - rustsecp256k1zkp_v0_4_0_scalar_add(&t, &t, &s); - } - - if (!nonce_parity) { - rustsecp256k1zkp_v0_4_0_scalar_negate(&t, &t); - } - rustsecp256k1zkp_v0_4_0_scalar_get_b32(sec_adaptor32, &t); - rustsecp256k1zkp_v0_4_0_scalar_clear(&t); - return 1; -} +#include "keyagg_impl.h" +#include "session_impl.h" +#include "adaptor_impl.h" #endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/musig-spec.mediawiki b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/musig-spec.mediawiki new file mode 100644 index 00000000..64fa4811 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/musig-spec.mediawiki @@ -0,0 +1,102 @@ +
+  Title: MuSig Key Aggregation
+  Author:
+  Status: Draft
+  License: BSD-2-Clause
+  Created: 2020-01-19
+
+ +== Introduction == + +=== Abstract === + +This document describes MuSig Key Aggregation in libsecp256k1-zkp. + +=== Copyright === + +This document is licensed under the 2-clause BSD license. + +=== Motivation === + +== Description == + +=== Design === + +* A function for sorting public keys allows to aggregate keys independent of the (initial) order. +* The KeyAgg coefficient is computed by hashing the key instead of key index. Otherwise, if the pubkey list gets sorted, the signer needs to translate between key indices pre- and post-sorting. +* The second unique key in the pubkey list gets the constant KeyAgg coefficient 1 which saves an exponentiation (see the MuSig2* appendix in the [https://eprint.iacr.org/2020/1261 MuSig2 paper]). + + +=== Specification === + +The following conventions are used, with constants as defined for [https://www.secg.org/sec2-v2.pdf secp256k1]. We note that adapting this specification to other elliptic curves is not straightforward and can result in an insecure schemeAmong other pitfalls, using the specification with a curve whose order is not close to the size of the range of the nonce derivation function is insecure.. +* Lowercase variables represent integers or byte arrays. +** The constant ''p'' refers to the field size, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F''. +** The constant ''n'' refers to the curve order, ''0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141''. +* Uppercase variables refer to points on the curve with equation ''y2 = x3 + 7'' over the integers modulo ''p''. +** ''is_infinite(P)'' returns whether or not ''P'' is the point at infinity. +** ''x(P)'' and ''y(P)'' are integers in the range ''0..p-1'' and refer to the X and Y coordinates of a point ''P'' (assuming it is not infinity). +** The constant ''G'' refers to the base point, for which ''x(G) = 0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798'' and ''y(G) = 0x483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8''. +** Addition of points refers to the usual [https://en.wikipedia.org/wiki/Elliptic_curve#The_group_law elliptic curve group operation]. +** [https://en.wikipedia.org/wiki/Elliptic_curve_point_multiplication Multiplication (ā‹…) of an integer and a point] refers to the repeated application of the group operation. +* Functions and operations: +** ''||'' refers to byte array concatenation. +** The function ''x[i:j]'', where ''x'' is a byte array and ''i, j ≥ 0'', returns a ''(j - i)''-byte array with a copy of the ''i''-th byte (inclusive) to the ''j''-th byte (exclusive) of ''x''. +** The function ''bytes(x)'', where ''x'' is an integer, returns the 32-byte encoding of ''x'', most significant byte first. +** The function ''bytes(P)'', where ''P'' is a point, returns ''bytes(x(P))''. +** The function ''int(x)'', where ''x'' is a 32-byte array, returns the 256-bit unsigned integer whose most significant byte first encoding is ''x''. +** The function ''has_even_y(P)'', where ''P'' is a point for which ''not is_infinite(P)'', returns ''y(P) mod 2 = 0''. +** The function ''lift_x(x)'', where ''x'' is an integer in range ''0..p-1'', returns the point ''P'' for which ''x(P) = x'' + Given a candidate X coordinate ''x'' in the range ''0..p-1'', there exist either exactly two or exactly zero valid Y coordinates. If no valid Y coordinate exists, then ''x'' is not a valid X coordinate either, i.e., no point ''P'' exists for which ''x(P) = x''. The valid Y coordinates for a given candidate ''x'' are the square roots of ''c = x3 + 7 mod p'' and they can be computed as ''y = ±c(p+1)/4 mod p'' (see [https://en.wikipedia.org/wiki/Quadratic_residue#Prime_or_prime_power_modulus Quadratic residue]) if they exist, which can be checked by squaring and comparing with ''c''. and ''has_even_y(P)'', or fails if no such point exists. The function ''lift_x(x)'' is equivalent to the following pseudocode: +*** Let ''c = x3 + 7 mod p''. +*** Let ''y = c(p+1)/4 mod p''. +*** Fail if ''c ≠ y2 mod p''. +*** Return the unique point ''P'' such that ''x(P) = x'' and ''y(P) = y'' if ''y mod 2 = 0'' or ''y(P) = p-y'' otherwise. +** The function ''hashtag(x)'' where ''tag'' is a UTF-8 encoded tag name and ''x'' is a byte array returns the 32-byte hash ''SHA256(SHA256(tag) || SHA256(tag) || x)''. + + +==== Key Sorting ==== + +Input: +* The number ''u'' of signatures with ''0 < u < 2^32'' +* The public keys ''pk1..u'': ''u'' 32-byte arrays + +The algorithm ''KeySort(pk1..u)'' is defined as: +* Return ''pk1..u'' sorted in lexicographical order. + +==== Key Aggregation ==== + +Input: +* The number ''u'' of signatures with ''0 < u < 2^32'' +* The public keys ''pk1..u'': ''u'' 32-byte arrays + +The algorithm ''KeyAgg(pk1..u)'' is defined as: +* For ''i = 1 .. u'': +** Let ''ai = KeyAggCoeff(pk1..u, i)''. +** Let ''Pi = lift_x(int(pki))''; fail if it fails. +* Let ''S = a1ā‹…P1 + a2ā‹…P1 + ... + auā‹…Pu'' +* Fail if ''is_infinite(S)''. +* Return ''bytes(S)''. + +The algorithm ''HashKeys(pk1..u)'' is defined as: +* Return ''hashKeyAgg list(pk1 || pk2 || ... || pku)'' + +The algorithm ''IsSecond(pk1..u, i)'' is defined as: +* For ''j = 1 .. u'': +** If ''pkj ≠ pk1'': +*** Return ''true'' if ''pkj = pki'', otherwise return ''false''. +* Return ''false'' + +The algorithm ''KeyAggCoeff(pk1..u, i)'' is defined as: +* Let ''L = HashKeys(pk1..u)''. +* Return 1 if ''IsSecond(pk1..u, i)'', otherwise return ''int(hashKeyAgg coefficient(L || pk) mod n''. + +== Applications == + +== Test Vectors and Reference Code == + +== Footnotes == + + + +== Acknowledgements == diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/musig.md b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/musig.md index 475681b5..8477f499 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/musig.md +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/musig.md @@ -1,198 +1,58 @@ -MuSig - Rogue-Key-Resistant Multisignatures Module +Notes on the musig module API =========================== -This module implements the MuSig [1] multisignature scheme. The majority of -the module is an API designed to be used by signing or auditing participants -in a multisignature scheme. This involves a somewhat complex state machine -and significant effort has been taken to prevent accidental misuse of the -API in ways that could lead to accidental signatures or loss of key material. +The following sections describe use of our API. A usage example can be found in `src/modules/musig/example.c`. -The resulting signatures are valid Schnorr signatures as described in [2]. +# API misuse -# Theory +The musig API is designed to be misuse resistant. +However, the MuSig protocol has some additional failure modes (mainly due to interactivity) that do not appear in single-signing. +While the results can be catastrophic (e.g. leaking of the secret key), it is unfortunately not possible for the musig implementation to rule out all such failure modes. -In MuSig all signers contribute key material to a single signing key, -using the equation +Therefore, users of the musig module must take great care to make sure of the following: - P = sum_i µ_i * P_i +1. A unique nonce per signing session is generated in `rustsecp256k1zkp_v0_4_0_musig_nonce_gen`. + See the corresponding comment in `include/rustsecp256k1zkp_v0_4_0_musig.h` for how to ensure that. +2. The `rustsecp256k1zkp_v0_4_0_musig_secnonce` structure is never copied or serialized. + See also the comment on `rustsecp256k1zkp_v0_4_0_musig_secnonce` in `include/rustsecp256k1zkp_v0_4_0_musig.h`. +3. Opaque data structures are never written to or read from directly. + Instead, only the provided accessor functions are used. +4. If adaptor signatures are used, all partial signatures are verified. -where `P_i` is the public key of the `i`th signer and `µ_i` is a so-called -_MuSig coefficient_ computed according to the following equation +# Key Aggregation and (Taproot) Tweaking - L = H(P_1 || P_2 || ... || P_n) - µ_i = H(L || i) +Given a set of public keys, the aggregate public key is computed with `rustsecp256k1zkp_v0_4_0_musig_pubkey_agg`. +A (Taproot) tweak can be added to the resulting public key with `rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_add`. -where H is a hash function modelled as a random oracle. +# Signing -To produce a multisignature `(s, R)` on a message `m` using verification key -`P`, signers act as follows: +This is covered by `src/modules/musig/example.c`. +Essentially, the protocol proceeds in the following steps: -1. Each computes a nonce, or ephemeral keypair, `(k_i, R_i)`. Every signer - communicates `H(R_i)` to every participant (both signers and auditors). -2. Upon receipt of every `H(R_i)`, each signer communicates `R_i` to every - participant. The recipients check that each `R_i` is consistent with the - previously-communicated hash. -3. Each signer computes a combined nonce - `R = sum_i R_i` - and shared challenge - `e = H(R || P || m)` - and partial signature - `s_i = k_i + µ_i*x_i*e` - where `x_i` is the secret key corresponding to `P_i`. +1. Generate a keypair with `rustsecp256k1zkp_v0_4_0_keypair_create` and obtain the xonly public key with `rustsecp256k1zkp_v0_4_0_keypair_xonly_pub`. +2. Call `rustsecp256k1zkp_v0_4_0_musig_pubkey_agg` with the xonly pubkeys of all participants. +3. Optionally add a (Taproot) tweak with `rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add`. +4. Generate a pair of secret and public nonce with `rustsecp256k1zkp_v0_4_0_musig_nonce_gen` and send the public nonce to the other signers. +5. Someone (not necessarily the signer) aggregates the public nonce with `rustsecp256k1zkp_v0_4_0_musig_nonce_agg` and sends it to the signers. +6. Process the aggregate nonce with `rustsecp256k1zkp_v0_4_0_musig_nonce_process`. +7. Create a partial signature with `rustsecp256k1zkp_v0_4_0_musig_partial_sign`. +8. Verify the partial signatures (optional in some scenarios) with `rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify`. +9. Someone (not necessarily the signer) obtains all partial signatures and aggregates them into the final Schnorr signature using `rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg`. -The complete signature is then the `(s, R)` where `s = sum_i s_i` and `R = sum_i R_i`. +The aggregate signature can be verified with `rustsecp256k1zkp_v0_4_0_schnorrsig_verify`. +Note that steps 1 to 3 can happen after generating the nonce in step 4, but this disables some of the defense-in-depth measures. -# API Usage +# Verification -The following sections describe use of our API, and are mirrored in code in `src/modules/musig/example.c`. +A participant who wants to verify the partial signatures, but does not sign itself may do so using the above instructions except that the verifier does _not_ generate a nonce with `rustsecp256k1zkp_v0_4_0_musig_nonce_gen`. -It is essential to security that signers use a unique uniformly random nonce for all -signing sessions, and that they do not reuse these nonces even in the case that a -signing session fails to complete. To that end, all signing state is encapsulated -in the data structure `rustsecp256k1zkp_v0_4_0_musig_session`. The API does not expose any -functionality to serialize or deserialize this structure; it is designed to exist -only in memory. - -Users who need to persist this structure must take additional security measures -which cannot be enforced by a C API. Some guidance is provided in the documentation -for this data structure in `include/rustsecp256k1zkp_v0_4_0_musig.h`. - -## Key Generation - -To use MuSig, users must first compute their combined public key `P`, which is -suitable for use on a blockchain or other public key repository. They do this -by calling `rustsecp256k1zkp_v0_4_0_musig_pubkey_combine`. - -This function takes as input a list of public keys `P_i` in the argument -`pubkeys`. It outputs the combined public key `P` in the out-pointer `combined_pk` -and hash `L` in the out-pointer `pk_hash32`, if this pointer is non-NULL. - -## Signing - -A participant who wishes to sign a message (as opposed to observing/auditing the -signature process, which is also a supported mode) acts as follows. - -### Signing Participant - -1. The signer starts the session by calling `rustsecp256k1zkp_v0_4_0_musig_session_init`. - This function outputs - - an initialized session state in the out-pointer `session` - - an array of initialized signer data in the out-pointer `signers` - - a commitment `H(R_i)` to a nonce in the out-pointer `nonce_commitment32` - It takes as input - - a unique session ID `session_id32` - - (optionally) a message to be signed `msg32` - - the combined public key output from `rustsecp256k1zkp_v0_4_0_musig_pubkey_combine` - - the public key hash output from `rustsecp256k1zkp_v0_4_0_musig_pubkey_combine` - - the signer's index `i` `my_index` - - the signer's secret key `seckey` -2. The signer then communicates `H(R_i)` to all other signers, and receives - commitments `H(R_j)` from all other signers `j`. These hashes are simply - length-32 byte arrays which can be communicated however is communicated. -3. Once all signers nonce commitments have been received, the signer records - these commitments with the function `rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce`. - If the signer did not provide a message to `rustsecp256k1zkp_v0_4_0_musig_session_init`, - a message must be provided now. - This function updates in place - - the session state `session` - - the array of signer data `signers` - taking in as input the list of commitments `commitments` and outputting the - signer's public nonce `R_i` in the out-pointer `nonce`. -4. The signer then communicates `R_i` to all other signers, and receives `R_j` - from each signer `j`. On receipt of a nonce `R_j` he calls the function - `rustsecp256k1zkp_v0_4_0_musig_set_nonce` to record this fact. This function checks that - the received nonce is consistent with the previously-received nonce and will - return 0 in this case. The signer must also call this function with his own - nonce and his own index `i`. - These nonces `R_i` are secp256k1 public keys; they should be serialized using - `rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize` and parsed with `rustsecp256k1zkp_v0_4_0_ec_pubkey_parse`. -5. Once all nonces have been exchanged in this way, signers are able to compute - their partial signatures. They do so by calling `rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces` - which updates in place - - the session state `session` - - the array of signer data `signers` - It outputs an auxiliary integer `nonce_is_negated` and has an auxiliary input - `adaptor`. Both of these may be set to NULL for ordinary signing purposes. -6. The signer computes a partial signature `s_i` using the function - `rustsecp256k1zkp_v0_4_0_musig_partial_sign` which takes the session state as input and - partial signature as output. -7. The signer then communicates the partial signature `s_i` to all other signers, or - to a central coordinator. These partial signatures should be serialized using - `musig_partial_signature_serialize` and parsed using `musig_partial_signature_parse`. -8. Each signer calls `rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify` on the other signers' partial - signatures to verify their correctness. If only the validity of the final signature - is important, not assigning blame, this step can be skipped. -9. Any signer, or central coordinator, may combine the partial signatures to obtain - a complete signature using `rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine`. This function takes - a signing session and array of MuSig partial signatures, and outputs a single - Schnorr signature. - -### Non-signing Participant - -A participant who wants to verify the signing process, i.e. check that nonce commitments -are consistent and partial signatures are correct without contributing a partial signature, -may do so using the above instructions except for the following changes: - -1. A signing session should be produced using `musig_session_init_verifier` - rather than `musig_session_init`; this function takes no secret data or - signer index. -2. The participant receives nonce commitments, public nonces and partial signatures, - but does not produce these values. Therefore `rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce` - and `rustsecp256k1zkp_v0_4_0_musig_partial_sign` are not called. - -### Verifier - -The final signature is simply a valid Schnorr signature using the combined public key. It -can be verified using the `rustsecp256k1zkp_v0_4_0_schnorrsig_verify` with the correct message and -public key output from `rustsecp256k1zkp_v0_4_0_musig_pubkey_combine`. - -## Atomic Swaps +# Atomic Swaps The signing API supports the production of "adaptor signatures", modified partial signatures which are offset by an auxiliary secret known to one party. That is, 1. One party generates a (secret) adaptor `t` with corresponding (public) adaptor `T = t*G`. -2. When combining nonces, each party adds `T` to the total nonce used in the signature. -3. The party who knows `t` must "adapt" their partial signature with `t` to complete the - signature. -4. Any party who sees both the final signature and the original partial signatures - can compute `t`. - -Using these adaptor signatures, two 2-of-2 MuSig signing protocols can be executed in -parallel such that one party's partial signatures are made atomic. That is, when the other -party learns one partial signature, she automatically learns the other. This has applications -in cross-chain atomic swaps. - -Such a protocol can be executed as follows. Consider two participants, Alice and Bob, who -are simultaneously producing 2-of-2 multisignatures for two blockchains A and B. They act -as follows. - -1. Before the protocol begins, Bob chooses a 32-byte auxiliary secret `t` at random and - computes a corresponding public point `T` by calling `rustsecp256k1zkp_v0_4_0_ec_pubkey_create`. - He communicates `T` to Alice. -2. Together, the parties execute steps 1-4 of the signing protocol above. -3. At step 5, when combining the two parties' public nonces, both parties call - `rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces` with `adaptor` set to `T` and `nonce_is_negated` - set to a non-NULL pointer to int. -4. Steps 6 and 7 proceed as before. Step 8, verifying the partial signatures, is now - essential to the security of the protocol and must not be omitted! - -The above steps are executed identically for both signing sessions. However, step 9 will -not work as before, since the partial signatures will not add up to a valid total signature. -Additional steps must be taken, and it is at this point that the two signing sessions -diverge. From here on we consider "Session A" which benefits Alice (e.g. which sends her -coins) and "Session B" which benefits Bob (e.g. which sends him coins). - -5. In Session B, Bob calls `rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt` with his partial signature - and `t`, to produce an adaptor signature. He can then call `rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine` - with this adaptor signature and Alice's partial signature, to produce a complete - signature for blockchain B. -6. Alice reads this signature from blockchain B. She calls `rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor`, - passing the complete signature along with her and Bob's partial signatures from Session B. - This function outputs `t`, which until this point was only known to Bob. -7. In Session A, Alice is now able to replicate Bob's action, calling - `rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt` with her own partial signature and `t`, ultimately - producing a complete signature on blockchain A. - -[1] https://eprint.iacr.org/2018/068 -[2] https://github.com/sipa/bips/blob/bip-schnorr/bip-schnorr.mediawiki - +2. When calling `rustsecp256k1zkp_v0_4_0_musig_nonce_process`, the public adaptor `T` is provided as the `adaptor` argument. +3. The party who is going to extract the secret adaptor `t` later must verify all partial signatures. +4. Due to step 2, the signature output of `rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg` is a pre-signature and not a valid Schnorr signature. All parties involved extract this session's `nonce_parity` with `rustsecp256k1zkp_v0_4_0_musig_nonce_parity`. +5. The party who knows `t` must "adapt" the pre-signature with `t` (and the `nonce_parity` using `rustsecp256k1zkp_v0_4_0_musig_adapt` to complete the signature. +6. Any party who sees both the final signature and the pre-signature (and has the `nonce_parity`) can extract `t` with `rustsecp256k1zkp_v0_4_0_musig_extract_adaptor`. diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/session.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/session.h new file mode 100644 index 00000000..7b6c2f58 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/session.h @@ -0,0 +1,20 @@ +/********************************************************************** + * Copyright (c) 2021 Jonas Nick * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_MODULE_MUSIG_SESSION_ +#define _SECP256K1_MODULE_MUSIG_SESSION_ + +typedef struct { + int fin_nonce_parity; + const unsigned char *fin_nonce; + rustsecp256k1zkp_v0_4_0_scalar noncecoef; + rustsecp256k1zkp_v0_4_0_scalar challenge; + rustsecp256k1zkp_v0_4_0_scalar s_part; +} rustsecp256k1zkp_v0_4_0_musig_session_internal; + +static int rustsecp256k1zkp_v0_4_0_musig_session_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session_internal *session_i, const rustsecp256k1zkp_v0_4_0_musig_session *session); + +#endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/session_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/session_impl.h new file mode 100644 index 00000000..7e727ae8 --- /dev/null +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/session_impl.h @@ -0,0 +1,620 @@ +/********************************************************************** + * Copyright (c) 2021 Jonas Nick * + * Distributed under the MIT software license, see the accompanying * + * file COPYING or http://www.opensource.org/licenses/mit-license.php.* + **********************************************************************/ + +#ifndef _SECP256K1_MODULE_MUSIG_SESSION_IMPL_ +#define _SECP256K1_MODULE_MUSIG_SESSION_IMPL_ + +#include "keyagg.h" +#include "session.h" + +static const unsigned char rustsecp256k1zkp_v0_4_0_musig_secnonce_magic[4] = { 0x22, 0x0e, 0xdc, 0xf1 }; + +static void rustsecp256k1zkp_v0_4_0_musig_secnonce_save(rustsecp256k1zkp_v0_4_0_musig_secnonce *secnonce, rustsecp256k1zkp_v0_4_0_scalar *k) { + memcpy(&secnonce->data[0], rustsecp256k1zkp_v0_4_0_musig_secnonce_magic, 4); + rustsecp256k1zkp_v0_4_0_scalar_get_b32(&secnonce->data[4], &k[0]); + rustsecp256k1zkp_v0_4_0_scalar_get_b32(&secnonce->data[36], &k[1]); +} + +static int rustsecp256k1zkp_v0_4_0_musig_secnonce_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_scalar *k, rustsecp256k1zkp_v0_4_0_musig_secnonce *secnonce) { + ARG_CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(&secnonce->data[0], rustsecp256k1zkp_v0_4_0_musig_secnonce_magic, 4) == 0); + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&k[0], &secnonce->data[4], NULL); + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&k[1], &secnonce->data[36], NULL); + return 1; +} + +static const unsigned char rustsecp256k1zkp_v0_4_0_musig_pubnonce_magic[4] = { 0xf5, 0x7a, 0x3d, 0xa0 }; + +/* Requires that none of the provided group elements is infinity. Works for both + * musig_pubnonce and musig_aggnonce. */ +static void rustsecp256k1zkp_v0_4_0_musig_pubnonce_save(rustsecp256k1zkp_v0_4_0_musig_pubnonce* nonce, rustsecp256k1zkp_v0_4_0_ge* ge) { + int i; + memcpy(&nonce->data[0], rustsecp256k1zkp_v0_4_0_musig_pubnonce_magic, 4); + for (i = 0; i < 2; i++) { + rustsecp256k1zkp_v0_4_0_point_save(nonce->data + 4+64*i, &ge[i]); + } +} + +/* Works for both musig_pubnonce and musig_aggnonce. Returns 1 unless the nonce + * wasn't properly initialized */ +static int rustsecp256k1zkp_v0_4_0_musig_pubnonce_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_ge* ge, const rustsecp256k1zkp_v0_4_0_musig_pubnonce* nonce) { + int i; + + ARG_CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(&nonce->data[0], rustsecp256k1zkp_v0_4_0_musig_pubnonce_magic, 4) == 0); + for (i = 0; i < 2; i++) { + rustsecp256k1zkp_v0_4_0_point_load(&ge[i], nonce->data + 4 + 64*i); + } + return 1; +} + +static const unsigned char rustsecp256k1zkp_v0_4_0_musig_session_cache_magic[4] = { 0x9d, 0xed, 0xe9, 0x17 }; + +/* A session consists of + * - 4 byte session cache + * - 1 byte the parity of the aggregate nonce + * - 32 byte aggregated nonce + * - 32 byte nonce aggregation coefficient b + * - 32 byte signature challenge hash e + * - 32 byte scalar s that is added to the partial signatures of the signers + */ +static void rustsecp256k1zkp_v0_4_0_musig_session_save(rustsecp256k1zkp_v0_4_0_musig_session *session, const rustsecp256k1zkp_v0_4_0_musig_session_internal *session_i) { + unsigned char *ptr = session->data; + + memcpy(ptr, rustsecp256k1zkp_v0_4_0_musig_session_cache_magic, 4); + ptr += 4; + *ptr = session_i->fin_nonce_parity; + ptr += 1; + memmove(ptr, session_i->fin_nonce, 32); + ptr += 32; + rustsecp256k1zkp_v0_4_0_scalar_get_b32(ptr, &session_i->noncecoef); + ptr += 32; + rustsecp256k1zkp_v0_4_0_scalar_get_b32(ptr, &session_i->challenge); + ptr += 32; + rustsecp256k1zkp_v0_4_0_scalar_get_b32(ptr, &session_i->s_part); +} + +static int rustsecp256k1zkp_v0_4_0_musig_session_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session_internal *session_i, const rustsecp256k1zkp_v0_4_0_musig_session *session) { + const unsigned char *ptr = session->data; + + ARG_CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(ptr, rustsecp256k1zkp_v0_4_0_musig_session_cache_magic, 4) == 0); + ptr += 4; + session_i->fin_nonce_parity = *ptr; + ptr += 1; + session_i->fin_nonce = ptr; + ptr += 32; + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&session_i->noncecoef, ptr, NULL); + ptr += 32; + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&session_i->challenge, ptr, NULL); + ptr += 32; + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&session_i->s_part, ptr, NULL); + return 1; +} + +static const unsigned char rustsecp256k1zkp_v0_4_0_musig_partial_sig_magic[4] = { 0xeb, 0xfb, 0x1a, 0x32 }; + +static void rustsecp256k1zkp_v0_4_0_musig_partial_sig_save(rustsecp256k1zkp_v0_4_0_musig_partial_sig* sig, rustsecp256k1zkp_v0_4_0_scalar *s) { + memcpy(&sig->data[0], rustsecp256k1zkp_v0_4_0_musig_partial_sig_magic, 4); + rustsecp256k1zkp_v0_4_0_scalar_get_b32(&sig->data[4], s); +} + +static int rustsecp256k1zkp_v0_4_0_musig_partial_sig_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_scalar *s, const rustsecp256k1zkp_v0_4_0_musig_partial_sig* sig) { + int overflow; + + ARG_CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(&sig->data[0], rustsecp256k1zkp_v0_4_0_musig_partial_sig_magic, 4) == 0); + rustsecp256k1zkp_v0_4_0_scalar_set_b32(s, &sig->data[4], &overflow); + /* Parsed signatures can not overflow */ + VERIFY_CHECK(!overflow); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *out66, const rustsecp256k1zkp_v0_4_0_musig_pubnonce* nonce) { + rustsecp256k1zkp_v0_4_0_ge ge[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(out66 != NULL); + memset(out66, 0, 66); + ARG_CHECK(nonce != NULL); + + if (!rustsecp256k1zkp_v0_4_0_musig_pubnonce_load(ctx, ge, nonce)) { + return 0; + } + for (i = 0; i < 2; i++) { + int ret; + size_t size = 33; + ret = rustsecp256k1zkp_v0_4_0_eckey_pubkey_serialize(&ge[i], &out66[33*i], &size, 1); + /* serialize must succeed because the point was just loaded */ + VERIFY_CHECK(ret); + } + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_pubnonce* nonce, const unsigned char *in66) { + rustsecp256k1zkp_v0_4_0_ge ge[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(nonce != NULL); + ARG_CHECK(in66 != NULL); + + for (i = 0; i < 2; i++) { + if (!rustsecp256k1zkp_v0_4_0_eckey_pubkey_parse(&ge[i], &in66[33*i], 33)) { + return 0; + } + if (!rustsecp256k1zkp_v0_4_0_ge_is_in_correct_subgroup(&ge[i])) { + return 0; + } + } + /* The group elements can not be infinity because they were just parsed */ + rustsecp256k1zkp_v0_4_0_musig_pubnonce_save(nonce, ge); + rustsecp256k1zkp_v0_4_0_ge_clear(&ge[0]); + rustsecp256k1zkp_v0_4_0_ge_clear(&ge[1]); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *out66, const rustsecp256k1zkp_v0_4_0_musig_aggnonce* nonce) { + return rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize(ctx, out66, (rustsecp256k1zkp_v0_4_0_musig_pubnonce*) nonce); +} + +int rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_aggnonce* nonce, const unsigned char *in66) { + return rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse(ctx, (rustsecp256k1zkp_v0_4_0_musig_pubnonce*) nonce, in66); +} + +int rustsecp256k1zkp_v0_4_0_musig_partial_sig_serialize(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *out32, const rustsecp256k1zkp_v0_4_0_musig_partial_sig* sig) { + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(out32 != NULL); + ARG_CHECK(sig != NULL); + memcpy(out32, &sig->data[4], 32); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_partial_sig_parse(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_partial_sig* sig, const unsigned char *in32) { + rustsecp256k1zkp_v0_4_0_scalar tmp; + int overflow; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(sig != NULL); + ARG_CHECK(in32 != NULL); + + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&tmp, in32, &overflow); + if (overflow) { + rustsecp256k1zkp_v0_4_0_scalar_clear(&tmp); + return 0; + } + rustsecp256k1zkp_v0_4_0_musig_partial_sig_save(sig, &tmp); + rustsecp256k1zkp_v0_4_0_scalar_clear(&tmp); + return 1; +} + +/* Normalizes the x-coordinate of the given group element. */ +static int rustsecp256k1zkp_v0_4_0_xonly_ge_serialize(unsigned char *output32, rustsecp256k1zkp_v0_4_0_ge *ge) { + if (rustsecp256k1zkp_v0_4_0_ge_is_infinity(ge)) { + return 0; + } + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&ge->x); + rustsecp256k1zkp_v0_4_0_fe_get_b32(output32, &ge->x); + return 1; +} + +static void rustsecp256k1zkp_v0_4_0_nonce_function_musig(rustsecp256k1zkp_v0_4_0_scalar *k, const unsigned char *session_id, const unsigned char *key32, const unsigned char *msg32, const unsigned char *agg_pk, const unsigned char *extra_input32) { + rustsecp256k1zkp_v0_4_0_sha256 sha; + unsigned char seed[32]; + unsigned char i; + enum { n_extra_in = 4 }; + const unsigned char *extra_in[n_extra_in]; + + /* TODO: this doesn't have the same sidechannel resistance as the BIP340 + * nonce function because the seckey feeds directly into SHA. */ + rustsecp256k1zkp_v0_4_0_sha256_initialize_tagged(&sha, (unsigned char*)"MuSig/nonce", 11); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, session_id, 32); + extra_in[0] = key32; + extra_in[1] = agg_pk; + extra_in[2] = msg32; + extra_in[3] = extra_input32; + for (i = 0; i < n_extra_in; i++) { + unsigned char marker; + if (extra_in[i] != NULL) { + marker = 1; + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, &marker, 1); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, extra_in[i], 32); + } else { + marker = 0; + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, &marker, 1); + } + } + rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, seed); + + for (i = 0; i < 2; i++) { + unsigned char buf[32]; + rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, seed, 32); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, &i, 1); + rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, buf); + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&k[i], buf, NULL); + } +} + +int rustsecp256k1zkp_v0_4_0_musig_nonce_gen(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_secnonce *secnonce, rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce, const unsigned char *session_id32, const unsigned char *seckey, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, const unsigned char *extra_input32) { + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + rustsecp256k1zkp_v0_4_0_scalar k[2]; + rustsecp256k1zkp_v0_4_0_ge nonce_pt[2]; + int i; + unsigned char pk_ser[32]; + unsigned char *pk_ser_ptr = NULL; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(secnonce != NULL); + memset(secnonce, 0, sizeof(*secnonce)); + ARG_CHECK(pubnonce != NULL); + memset(pubnonce, 0, sizeof(*pubnonce)); + ARG_CHECK(session_id32 != NULL); + ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); + + /* Check that the seckey is valid to be able to sign for it later. */ + if (seckey != NULL) { + rustsecp256k1zkp_v0_4_0_scalar sk; + int ret; + ret = rustsecp256k1zkp_v0_4_0_scalar_set_b32_seckey(&sk, seckey); + /* The declassified return value indicates the validity of the seckey. + * If this function is called correctly it is always 1. (Note: + * declassify was only required for valgrind_ctime_test build with + * USE_ASM_X86_64=no. */ + rustsecp256k1zkp_v0_4_0_declassify(ctx, &ret, sizeof(ret)); + ARG_CHECK(ret); + rustsecp256k1zkp_v0_4_0_scalar_clear(&sk); + } + + if (keyagg_cache != NULL) { + int ret; + if (!rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, keyagg_cache)) { + return 0; + } + ret = rustsecp256k1zkp_v0_4_0_xonly_ge_serialize(pk_ser, &cache_i.pk); + /* Serialization can not fail because the loaded point can not be infinity. */ + VERIFY_CHECK(ret); + pk_ser_ptr = pk_ser; + } + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k, session_id32, seckey, msg32, pk_ser_ptr, extra_input32); + VERIFY_CHECK(!rustsecp256k1zkp_v0_4_0_scalar_is_zero(&k[0])); + VERIFY_CHECK(!rustsecp256k1zkp_v0_4_0_scalar_is_zero(&k[1])); + rustsecp256k1zkp_v0_4_0_musig_secnonce_save(secnonce, k); + + for (i = 0; i < 2; i++) { + rustsecp256k1zkp_v0_4_0_gej nonce_ptj; + rustsecp256k1zkp_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &nonce_ptj, &k[i]); + rustsecp256k1zkp_v0_4_0_ge_set_gej(&nonce_pt[i], &nonce_ptj); + rustsecp256k1zkp_v0_4_0_declassify(ctx, &nonce_pt[i], sizeof(nonce_pt)); + rustsecp256k1zkp_v0_4_0_scalar_clear(&k[i]); + } + /* nonce_pt can't be infinity because k != 0 */ + rustsecp256k1zkp_v0_4_0_musig_pubnonce_save(pubnonce, nonce_pt); + return 1; +} + +static int rustsecp256k1zkp_v0_4_0_musig_sum_nonces(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_gej *summed_nonces, const rustsecp256k1zkp_v0_4_0_musig_pubnonce * const* pubnonces, size_t n_pubnonces) { + size_t i; + int j; + + rustsecp256k1zkp_v0_4_0_gej_set_infinity(&summed_nonces[0]); + rustsecp256k1zkp_v0_4_0_gej_set_infinity(&summed_nonces[1]); + + for (i = 0; i < n_pubnonces; i++) { + rustsecp256k1zkp_v0_4_0_ge nonce_pt[2]; + if (!rustsecp256k1zkp_v0_4_0_musig_pubnonce_load(ctx, nonce_pt, pubnonces[i])) { + return 0; + } + for (j = 0; j < 2; j++) { + rustsecp256k1zkp_v0_4_0_gej_add_ge_var(&summed_nonces[j], &summed_nonces[j], &nonce_pt[j], NULL); + } + } + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_nonce_agg(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_aggnonce *aggnonce, const rustsecp256k1zkp_v0_4_0_musig_pubnonce * const* pubnonces, size_t n_pubnonces) { + rustsecp256k1zkp_v0_4_0_gej aggnonce_ptj[2]; + rustsecp256k1zkp_v0_4_0_ge aggnonce_pt[2]; + int i; + + ARG_CHECK(aggnonce != NULL); + ARG_CHECK(pubnonces != NULL); + ARG_CHECK(n_pubnonces > 0); + + if (!rustsecp256k1zkp_v0_4_0_musig_sum_nonces(ctx, aggnonce_ptj, pubnonces, n_pubnonces)) { + return 0; + } + for (i = 0; i < 2; i++) { + if (rustsecp256k1zkp_v0_4_0_gej_is_infinity(&aggnonce_ptj[i])) { + return 0; + } + rustsecp256k1zkp_v0_4_0_ge_set_gej(&aggnonce_pt[i], &aggnonce_ptj[i]); + } + rustsecp256k1zkp_v0_4_0_musig_pubnonce_save((rustsecp256k1zkp_v0_4_0_musig_pubnonce*)aggnonce, aggnonce_pt); + return 1; +} + +/* hash(aggnonce[0], aggnonce[1], agg_pk, msg) */ +static int rustsecp256k1zkp_v0_4_0_musig_compute_noncehash(unsigned char *noncehash, rustsecp256k1zkp_v0_4_0_ge *aggnonce, const unsigned char *agg_pk32, const unsigned char *msg) { + unsigned char buf[33]; + rustsecp256k1zkp_v0_4_0_sha256 sha; + int i; + + rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); + for (i = 0; i < 2; i++) { + size_t size = sizeof(buf); + if (!rustsecp256k1zkp_v0_4_0_eckey_pubkey_serialize(&aggnonce[i], buf, &size, 1)) { + return 0; + } + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, buf, sizeof(buf)); + } + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, agg_pk32, 32); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg, 32); + rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, noncehash); + return 1; +} + +static int rustsecp256k1zkp_v0_4_0_musig_nonce_process_internal(const rustsecp256k1zkp_v0_4_0_ecmult_context* ecmult_ctx, int *fin_nonce_parity, unsigned char *fin_nonce, rustsecp256k1zkp_v0_4_0_scalar *b, rustsecp256k1zkp_v0_4_0_gej *aggnoncej, const unsigned char *agg_pk32, const unsigned char *msg) { + unsigned char noncehash[32]; + rustsecp256k1zkp_v0_4_0_ge fin_nonce_pt; + rustsecp256k1zkp_v0_4_0_gej fin_nonce_ptj; + rustsecp256k1zkp_v0_4_0_ge aggnonce[2]; + + rustsecp256k1zkp_v0_4_0_ge_set_gej(&aggnonce[0], &aggnoncej[0]); + rustsecp256k1zkp_v0_4_0_ge_set_gej(&aggnonce[1], &aggnoncej[1]); + if (!rustsecp256k1zkp_v0_4_0_musig_compute_noncehash(noncehash, aggnonce, agg_pk32, msg)) { + return 0; + } + /* aggnonce = aggnonces[0] + b*aggnonces[1] */ + rustsecp256k1zkp_v0_4_0_scalar_set_b32(b, noncehash, NULL); + rustsecp256k1zkp_v0_4_0_ecmult(ecmult_ctx, &fin_nonce_ptj, &aggnoncej[1], b, NULL); + rustsecp256k1zkp_v0_4_0_gej_add_ge(&fin_nonce_ptj, &fin_nonce_ptj, &aggnonce[0]); + rustsecp256k1zkp_v0_4_0_ge_set_gej(&fin_nonce_pt, &fin_nonce_ptj); + if (!rustsecp256k1zkp_v0_4_0_xonly_ge_serialize(fin_nonce, &fin_nonce_pt)) { + /* unreachable with overwhelming probability */ + return 0; + } + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&fin_nonce_pt.y); + *fin_nonce_parity = rustsecp256k1zkp_v0_4_0_fe_is_odd(&fin_nonce_pt.y); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_nonce_process(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_session *session, const rustsecp256k1zkp_v0_4_0_musig_aggnonce *aggnonce, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, const rustsecp256k1zkp_v0_4_0_pubkey *adaptor) { + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + rustsecp256k1zkp_v0_4_0_ge aggnonce_pt[2]; + rustsecp256k1zkp_v0_4_0_gej aggnonce_ptj[2]; + unsigned char fin_nonce[32]; + rustsecp256k1zkp_v0_4_0_musig_session_internal session_i; + unsigned char agg_pk32[32]; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(session != NULL); + ARG_CHECK(aggnonce != NULL); + ARG_CHECK(msg32 != NULL); + ARG_CHECK(keyagg_cache != NULL); + + if (!rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, keyagg_cache)) { + return 0; + } + rustsecp256k1zkp_v0_4_0_fe_get_b32(agg_pk32, &cache_i.pk.x); + + if (!rustsecp256k1zkp_v0_4_0_musig_pubnonce_load(ctx, aggnonce_pt, (rustsecp256k1zkp_v0_4_0_musig_pubnonce*)aggnonce)) { + return 0; + } + rustsecp256k1zkp_v0_4_0_gej_set_ge(&aggnonce_ptj[0], &aggnonce_pt[0]); + rustsecp256k1zkp_v0_4_0_gej_set_ge(&aggnonce_ptj[1], &aggnonce_pt[1]); + /* Add public adaptor to nonce */ + if (adaptor != NULL) { + rustsecp256k1zkp_v0_4_0_ge adaptorp; + if (!rustsecp256k1zkp_v0_4_0_pubkey_load(ctx, &adaptorp, adaptor)) { + return 0; + } + rustsecp256k1zkp_v0_4_0_gej_add_ge_var(&aggnonce_ptj[0], &aggnonce_ptj[0], &adaptorp, NULL); + } + if (!rustsecp256k1zkp_v0_4_0_musig_nonce_process_internal(&ctx->ecmult_ctx, &session_i.fin_nonce_parity, fin_nonce, &session_i.noncecoef, aggnonce_ptj, agg_pk32, msg32)) { + return 0; + } + + /* Compute messagehash and store in session cache */ + rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&session_i.challenge, fin_nonce, msg32, 32, agg_pk32); + + /* If there is a tweak then set `msghash` times `tweak` to the `s`-part.*/ + rustsecp256k1zkp_v0_4_0_scalar_clear(&session_i.s_part); + if (cache_i.is_tweaked) { + rustsecp256k1zkp_v0_4_0_scalar e_tmp = session_i.challenge; + if (!rustsecp256k1zkp_v0_4_0_eckey_privkey_tweak_mul(&e_tmp, &cache_i.tweak)) { + /* This mimics the behavior of rustsecp256k1zkp_v0_4_0_ec_seckey_tweak_mul regarding + * tweak being 0. */ + return 0; + } + if (rustsecp256k1zkp_v0_4_0_fe_is_odd(&cache_i.pk.y)) { + rustsecp256k1zkp_v0_4_0_scalar_negate(&e_tmp, &e_tmp); + } + rustsecp256k1zkp_v0_4_0_scalar_add(&session_i.s_part, &session_i.s_part, &e_tmp); + } + session_i.fin_nonce = fin_nonce; + rustsecp256k1zkp_v0_4_0_musig_session_save(session, &session_i); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_partial_sign(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig, rustsecp256k1zkp_v0_4_0_musig_secnonce *secnonce, const rustsecp256k1zkp_v0_4_0_keypair *keypair, const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, const rustsecp256k1zkp_v0_4_0_musig_session *session) { + rustsecp256k1zkp_v0_4_0_scalar sk; + rustsecp256k1zkp_v0_4_0_ge pk; + rustsecp256k1zkp_v0_4_0_scalar k[2]; + rustsecp256k1zkp_v0_4_0_scalar mu, s; + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + rustsecp256k1zkp_v0_4_0_musig_session_internal session_i; + int ret; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(partial_sig != NULL); + ARG_CHECK(secnonce != NULL); + ARG_CHECK(keypair != NULL); + ARG_CHECK(keyagg_cache != NULL); + ARG_CHECK(session != NULL); + + /* Fails if the magic doesn't match */ + ret = rustsecp256k1zkp_v0_4_0_musig_secnonce_load(ctx, k, secnonce); + /* Set nonce to zero to avoid nonce reuse. This will cause subsequent calls + * of this function to fail */ + memset(secnonce, 0, sizeof(*secnonce)); + if (!ret) { + return 0; + } + + /* Obtain the signer's public key point and determine if the sk is + * negated before signing. That happens if if the signer's pubkey has an odd + * Y coordinate XOR the MuSig-aggregate pubkey has an odd Y coordinate XOR + * (if tweaked) the internal key has an odd Y coordinate. + * + * This can be seen by looking at the sk key belonging to `agg_pk`. + * Let's define + * P' := mu_0*|P_0| + ... + mu_n*|P_n| where P_i is the i-th public key + * point x_i*G, mu_i is the i-th KeyAgg coefficient and |.| is a function + * that normalizes a point to an even Y by negating if necessary similar to + * rustsecp256k1zkp_v0_4_0_extrakeys_ge_even_y. Then we have + * P := |P'| + t*G where t is the tweak. + * And the aggregate xonly public key is + * |P| = x*G + * where x = sum_i(b_i*mu_i*x_i) + b'*t + * b' = -1 if P != |P|, 1 otherwise + * b_i = -1 if (P_i != |P_i| XOR P' != |P'| XOR P != |P|) and 1 + * otherwise. + */ + if (!rustsecp256k1zkp_v0_4_0_keypair_load(ctx, &sk, &pk, keypair)) { + return 0; + } + if (!rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, keyagg_cache)) { + return 0; + } + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&pk.y); + if((rustsecp256k1zkp_v0_4_0_fe_is_odd(&pk.y) + + rustsecp256k1zkp_v0_4_0_fe_is_odd(&cache_i.pk.y) + + (cache_i.is_tweaked + && cache_i.internal_key_parity)) + % 2 == 1) { + rustsecp256k1zkp_v0_4_0_scalar_negate(&sk, &sk); + } + + /* Multiply KeyAgg coefficient */ + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&pk.x); + rustsecp256k1zkp_v0_4_0_musig_keyaggcoef(&mu, &cache_i, &pk.x); + rustsecp256k1zkp_v0_4_0_scalar_mul(&sk, &sk, &mu); + + if (!rustsecp256k1zkp_v0_4_0_musig_session_load(ctx, &session_i, session)) { + return 0; + } + if (session_i.fin_nonce_parity) { + rustsecp256k1zkp_v0_4_0_scalar_negate(&k[0], &k[0]); + rustsecp256k1zkp_v0_4_0_scalar_negate(&k[1], &k[1]); + } + + /* Sign */ + rustsecp256k1zkp_v0_4_0_scalar_mul(&s, &session_i.challenge, &sk); + rustsecp256k1zkp_v0_4_0_scalar_mul(&k[1], &session_i.noncecoef, &k[1]); + rustsecp256k1zkp_v0_4_0_scalar_add(&k[0], &k[0], &k[1]); + rustsecp256k1zkp_v0_4_0_scalar_add(&s, &s, &k[0]); + rustsecp256k1zkp_v0_4_0_musig_partial_sig_save(partial_sig, &s); + rustsecp256k1zkp_v0_4_0_scalar_clear(&sk); + rustsecp256k1zkp_v0_4_0_scalar_clear(&k[0]); + rustsecp256k1zkp_v0_4_0_scalar_clear(&k[1]); + return 1; +} + +int rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig, const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey, const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, const rustsecp256k1zkp_v0_4_0_musig_session *session) { + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + rustsecp256k1zkp_v0_4_0_musig_session_internal session_i; + rustsecp256k1zkp_v0_4_0_scalar mu, e, s; + rustsecp256k1zkp_v0_4_0_gej pkj; + rustsecp256k1zkp_v0_4_0_ge nonce_pt[2]; + rustsecp256k1zkp_v0_4_0_gej rj; + rustsecp256k1zkp_v0_4_0_gej tmp; + rustsecp256k1zkp_v0_4_0_ge pkp; + int i; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); + ARG_CHECK(partial_sig != NULL); + ARG_CHECK(pubnonce != NULL); + ARG_CHECK(pubkey != NULL); + ARG_CHECK(keyagg_cache != NULL); + ARG_CHECK(session != NULL); + + if (!rustsecp256k1zkp_v0_4_0_musig_session_load(ctx, &session_i, session)) { + return 0; + } + + /* Compute "effective" nonce rj = aggnonce[0] + b*aggnonce[1] */ + /* TODO: use multiexp */ + for (i = 0; i < 2; i++) { + if (!rustsecp256k1zkp_v0_4_0_musig_pubnonce_load(ctx, nonce_pt, pubnonce)) { + return 0; + } + } + rustsecp256k1zkp_v0_4_0_gej_set_ge(&rj, &nonce_pt[1]); + rustsecp256k1zkp_v0_4_0_ecmult(&ctx->ecmult_ctx, &rj, &rj, &session_i.noncecoef, NULL); + rustsecp256k1zkp_v0_4_0_gej_add_ge_var(&rj, &rj, &nonce_pt[0], NULL); + + if (!rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &pkp, pubkey)) { + return 0; + } + if (!rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, keyagg_cache)) { + return 0; + } + /* Multiplying the messagehash by the KeyAgg coefficient is equivalent + * to multiplying the signer's public key by the coefficient, except + * much easier to do. */ + rustsecp256k1zkp_v0_4_0_musig_keyaggcoef(&mu, &cache_i, &pkp.x); + rustsecp256k1zkp_v0_4_0_scalar_mul(&e, &session_i.challenge, &mu); + + /* If the MuSig-aggregate point has an odd Y coordinate, the signers will + * sign for the negation of their individual xonly public key such that the + * aggregate signature is valid for the MuSig aggregated xonly key. If the + * MuSig-aggregate point was tweaked then `e` is negated if the aggregate key + * has an odd Y coordinate XOR the internal key has an odd Y coordinate.*/ + if (rustsecp256k1zkp_v0_4_0_fe_is_odd(&cache_i.pk.y) + != (cache_i.is_tweaked + && cache_i.internal_key_parity)) { + rustsecp256k1zkp_v0_4_0_scalar_negate(&e, &e); + } + + if (!rustsecp256k1zkp_v0_4_0_musig_partial_sig_load(ctx, &s, partial_sig)) { + return 0; + } + /* Compute -s*G + e*pkj + rj */ + rustsecp256k1zkp_v0_4_0_scalar_negate(&s, &s); + rustsecp256k1zkp_v0_4_0_gej_set_ge(&pkj, &pkp); + rustsecp256k1zkp_v0_4_0_ecmult(&ctx->ecmult_ctx, &tmp, &pkj, &e, &s); + if (session_i.fin_nonce_parity) { + rustsecp256k1zkp_v0_4_0_gej_neg(&rj, &rj); + } + rustsecp256k1zkp_v0_4_0_gej_add_var(&tmp, &tmp, &rj, NULL); + + return rustsecp256k1zkp_v0_4_0_gej_is_infinity(&tmp); +} + +int rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sig64, const rustsecp256k1zkp_v0_4_0_musig_session *session, const rustsecp256k1zkp_v0_4_0_musig_partial_sig * const* partial_sigs, size_t n_sigs) { + size_t i; + rustsecp256k1zkp_v0_4_0_musig_session_internal session_i; + + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(sig64 != NULL); + ARG_CHECK(session != NULL); + ARG_CHECK(partial_sigs != NULL); + + if (!rustsecp256k1zkp_v0_4_0_musig_session_load(ctx, &session_i, session)) { + return 0; + } + for (i = 0; i < n_sigs; i++) { + rustsecp256k1zkp_v0_4_0_scalar term; + if (!rustsecp256k1zkp_v0_4_0_musig_partial_sig_load(ctx, &term, partial_sigs[i])) { + return 0; + } + rustsecp256k1zkp_v0_4_0_scalar_add(&session_i.s_part, &session_i.s_part, &term); + } + rustsecp256k1zkp_v0_4_0_scalar_get_b32(&sig64[32], &session_i.s_part); + memcpy(&sig64[0], session_i.fin_nonce, 32); + return 1; +} + +#endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/tests_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/tests_impl.h index f2164aff..72c3cc40 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/tests_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/musig/tests_impl.h @@ -9,100 +9,131 @@ #include "secp256k1_musig.h" -int rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk, const unsigned char *seckey) { +static int create_keypair_and_pk(rustsecp256k1zkp_v0_4_0_keypair *keypair, rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk, const unsigned char *sk) { int ret; - rustsecp256k1zkp_v0_4_0_keypair keypair; - ret = rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, seckey); - ret &= rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, pk, NULL, &keypair); + rustsecp256k1zkp_v0_4_0_keypair keypair_tmp; + ret = rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair_tmp, sk); + ret &= rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, pk, NULL, &keypair_tmp); + if (keypair != NULL) { + *keypair = keypair_tmp; + } return ret; } -/* Just a simple (non-adaptor, non-tweaked) 2-of-2 MuSig combine, sign, verify +/* Just a simple (non-adaptor, non-tweaked) 2-of-2 MuSig aggregate, sign, verify * test. */ void musig_simple_test(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { unsigned char sk[2][32]; - rustsecp256k1zkp_v0_4_0_musig_session session[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signer0[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signer1[2]; - unsigned char nonce_commitment[2][32]; + rustsecp256k1zkp_v0_4_0_keypair keypair[2]; + rustsecp256k1zkp_v0_4_0_musig_pubnonce pubnonce[2]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_aggnonce aggnonce; unsigned char msg[32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache; unsigned char session_id[2][32]; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce[2]; rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[2]; - const unsigned char *ncs[2]; - unsigned char public_nonce[3][32]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig[2]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig[2]; + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig_ptr[2]; unsigned char final_sig[64]; + rustsecp256k1zkp_v0_4_0_musig_session session; + int i; - rustsecp256k1zkp_v0_4_0_testrand256(session_id[0]); - rustsecp256k1zkp_v0_4_0_testrand256(session_id[1]); - rustsecp256k1zkp_v0_4_0_testrand256(sk[0]); - rustsecp256k1zkp_v0_4_0_testrand256(sk[1]); rustsecp256k1zkp_v0_4_0_testrand256(msg); + for (i = 0; i < 2; i++) { + rustsecp256k1zkp_v0_4_0_testrand256(session_id[i]); + rustsecp256k1zkp_v0_4_0_testrand256(sk[i]); + pk_ptr[i] = &pk[i]; + pubnonce_ptr[i] = &pubnonce[i]; + partial_sig_ptr[i] = &partial_sig[i]; + CHECK(create_keypair_and_pk(&keypair[i], &pk[i], sk[i])); + } + + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, scratch, &agg_pk, &keyagg_cache, pk_ptr, 2) == 1); + + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce[0], &pubnonce[0], session_id[0], sk[0], NULL, NULL, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce[1], &pubnonce[1], session_id[1], sk[1], NULL, NULL, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[0], sk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[1], sk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, scratch, &combined_pk, &pre_session, pk, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session[1], signer1, nonce_commitment[1], session_id[1], msg, &combined_pk, &pre_session, 2, 1, sk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(ctx, &aggnonce, pubnonce_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(ctx, &session, &aggnonce, msg, &keyagg_cache, NULL) == 1); - ncs[0] = nonce_commitment[0]; - ncs[1] = nonce_commitment[1]; + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig[0], &secnonce[0], &keypair[0], &keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &partial_sig[0], &pubnonce[0], &pk[0], &keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig[1], &secnonce[1], &keypair[1], &keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &partial_sig[1], &pubnonce[1], &pk[1], &keyagg_cache, &session) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session[0], signer0, public_nonce[0], ncs, 2, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session[1], signer1, public_nonce[1], ncs, 2, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(ctx, final_sig, &session, partial_sig_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig, msg, sizeof(msg), &agg_pk) == 1); +} - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signer0[0], public_nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signer0[1], public_nonce[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signer1[0], public_nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signer1[1], public_nonce[1]) == 1); +void pubnonce_summing_to_inf(rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce) { + rustsecp256k1zkp_v0_4_0_ge ge[2]; + int i; + rustsecp256k1zkp_v0_4_0_gej summed_nonces[2]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce_ptr[2]; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session[0], signer0, 2, NULL, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session[1], signer1, 2, NULL, NULL) == 1); + ge[0] = rustsecp256k1zkp_v0_4_0_ge_const_g; + ge[1] = rustsecp256k1zkp_v0_4_0_ge_const_g; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &session[0], &partial_sig[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[0], &signer0[0], &partial_sig[0], &pk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &session[1], &partial_sig[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[0], &signer0[1], &partial_sig[1], &pk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[1], &signer1[1], &partial_sig[1], &pk[1]) == 1); + for (i = 0; i < 2; i++) { + rustsecp256k1zkp_v0_4_0_musig_pubnonce_save(&pubnonce[i], ge); + pubnonce_ptr[i] = &pubnonce[i]; + rustsecp256k1zkp_v0_4_0_ge_neg(&ge[0], &ge[0]); + rustsecp256k1zkp_v0_4_0_ge_neg(&ge[1], &ge[1]); + } - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(ctx, &session[0], final_sig, partial_sig, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig, msg, &combined_pk) == 1); + rustsecp256k1zkp_v0_4_0_musig_sum_nonces(ctx, summed_nonces, pubnonce_ptr, 2); + rustsecp256k1zkp_v0_4_0_gej_is_infinity(&summed_nonces[0]); + rustsecp256k1zkp_v0_4_0_gej_is_infinity(&summed_nonces[1]); } void musig_api_tests(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { rustsecp256k1zkp_v0_4_0_scratch_space *scratch_small; - rustsecp256k1zkp_v0_4_0_musig_session session[2]; - rustsecp256k1zkp_v0_4_0_musig_session session_uninitialized; - rustsecp256k1zkp_v0_4_0_musig_session verifier_session; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signer0[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signer1[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data verifier_signer_data[2]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig[2]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig_adapted[2]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig_overflow; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig[2]; + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_partial_sig invalid_partial_sig; + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *invalid_partial_sig_ptr[2]; unsigned char final_sig[64]; - unsigned char final_sig_cmp[64]; - + unsigned char pre_sig[64]; unsigned char buf[32]; unsigned char sk[2][32]; - unsigned char ones[32]; + rustsecp256k1zkp_v0_4_0_keypair keypair[2]; + rustsecp256k1zkp_v0_4_0_keypair invalid_keypair; + unsigned char max64[64]; + unsigned char zeros68[68] = { 0 }; unsigned char session_id[2][32]; - unsigned char nonce_commitment[2][32]; - int combined_nonce_parity; - const unsigned char *ncs[2]; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce[2]; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce_tmp; + rustsecp256k1zkp_v0_4_0_musig_secnonce invalid_secnonce; + rustsecp256k1zkp_v0_4_0_musig_pubnonce pubnonce[2]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce_ptr[2]; + unsigned char pubnonce_ser[66]; + rustsecp256k1zkp_v0_4_0_musig_pubnonce inf_pubnonce[2]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *inf_pubnonce_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_pubnonce invalid_pubnonce; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *invalid_pubnonce_ptr[1]; + rustsecp256k1zkp_v0_4_0_musig_aggnonce aggnonce; + unsigned char aggnonce_ser[66]; unsigned char msg[32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session_uninitialized; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache invalid_keyagg_cache; + rustsecp256k1zkp_v0_4_0_musig_session session; + rustsecp256k1zkp_v0_4_0_musig_session invalid_session; rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[2]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_ptr[2]; + rustsecp256k1zkp_v0_4_0_xonly_pubkey invalid_pk; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *invalid_pk_ptr2[2]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *invalid_pk_ptr3[3]; unsigned char tweak[32]; - + int nonce_parity; unsigned char sec_adaptor[32]; unsigned char sec_adaptor1[32]; rustsecp256k1zkp_v0_4_0_pubkey adaptor; + int i; /** setup **/ rustsecp256k1zkp_v0_4_0_context *none = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); @@ -117,606 +148,460 @@ void musig_api_tests(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { rustsecp256k1zkp_v0_4_0_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount); rustsecp256k1zkp_v0_4_0_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount); - memset(ones, 0xff, 32); + memset(max64, 0xff, sizeof(max64)); + memset(&invalid_keypair, 0, sizeof(invalid_keypair)); + memset(&invalid_pk, 0, sizeof(invalid_pk)); + memset(&invalid_secnonce, 0, sizeof(invalid_secnonce)); + memset(&invalid_partial_sig, 0, sizeof(invalid_partial_sig)); + pubnonce_summing_to_inf(inf_pubnonce); /* Simulate structs being uninitialized by setting it to 0s. We don't want * to produce undefined behavior by actually providing uninitialized * structs. */ - memset(&pre_session_uninitialized, 0, sizeof(pre_session_uninitialized)); - memset(&session_uninitialized, 0, sizeof(session_uninitialized)); + memset(&invalid_keyagg_cache, 0, sizeof(invalid_keyagg_cache)); + memset(&invalid_pk, 0, sizeof(invalid_pk)); + memset(&invalid_pubnonce, 0, sizeof(invalid_pubnonce)); + memset(&invalid_session, 0, sizeof(invalid_session)); - rustsecp256k1zkp_v0_4_0_testrand256(session_id[0]); - rustsecp256k1zkp_v0_4_0_testrand256(session_id[1]); - rustsecp256k1zkp_v0_4_0_testrand256(sk[0]); - rustsecp256k1zkp_v0_4_0_testrand256(sk[1]); - rustsecp256k1zkp_v0_4_0_testrand256(msg); rustsecp256k1zkp_v0_4_0_testrand256(sec_adaptor); + rustsecp256k1zkp_v0_4_0_testrand256(msg); rustsecp256k1zkp_v0_4_0_testrand256(tweak); - - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[0], sk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[1], sk[1]) == 1); CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_create(ctx, &adaptor, sec_adaptor) == 1); - + for (i = 0; i < 2; i++) { + pk_ptr[i] = &pk[i]; + invalid_pk_ptr2[i] = &invalid_pk; + invalid_pk_ptr3[i] = &pk[i]; + pubnonce_ptr[i] = &pubnonce[i]; + inf_pubnonce_ptr[i] = &inf_pubnonce[i]; + partial_sig_ptr[i] = &partial_sig[i]; + invalid_partial_sig_ptr[i] = &partial_sig[i]; + rustsecp256k1zkp_v0_4_0_testrand256(session_id[i]); + rustsecp256k1zkp_v0_4_0_testrand256(sk[i]); + CHECK(create_keypair_and_pk(&keypair[i], &pk[i], sk[i])); + } + invalid_pubnonce_ptr[0] = &invalid_pubnonce; + invalid_partial_sig_ptr[0] = &invalid_partial_sig; + /* invalid_pk_ptr3 has two valid, one invalid pk, which is important to test + * musig_pubkey_agg */ + invalid_pk_ptr3[2] = &invalid_pk; /** main test body **/ - /* Key combination */ + /* Key aggregation */ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(none, scratch, &combined_pk, &pre_session, pk, 2) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(none, scratch, &agg_pk, &keyagg_cache, pk_ptr, 2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(sign, scratch, &combined_pk, &pre_session, pk, 2) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(sign, scratch, &agg_pk, &keyagg_cache, pk_ptr, 2) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, &pre_session, pk, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, pk_ptr, 2) == 1); CHECK(ecount == 2); - /* pubkey_combine does not require a scratch space */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, NULL, &combined_pk, &pre_session, pk, 2) == 1); + /* pubkey_agg does not require a scratch space */ + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, NULL, &agg_pk, &keyagg_cache, pk_ptr, 2) == 1); CHECK(ecount == 2); /* A small scratch space works too, but will result in using an ineffecient algorithm */ scratch_small = rustsecp256k1zkp_v0_4_0_scratch_space_create(ctx, 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch_small, &combined_pk, &pre_session, pk, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch_small, &agg_pk, &keyagg_cache, pk_ptr, 2) == 1); rustsecp256k1zkp_v0_4_0_scratch_space_destroy(ctx, scratch_small); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, NULL, &pre_session, pk, 2) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, NULL, pk, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, NULL, &keyagg_cache, pk_ptr, 2) == 1); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, NULL, pk_ptr, 2) == 1); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, NULL, 2) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, &pre_session, NULL, 2) == 0); + CHECK(memcmp(&agg_pk, zeros68, sizeof(agg_pk)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, invalid_pk_ptr2, 2) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, &pre_session, pk, 0) == 0); + CHECK(memcmp(&agg_pk, zeros68, sizeof(agg_pk)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, invalid_pk_ptr3, 3) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, &pre_session, NULL, 0) == 0); + CHECK(memcmp(&agg_pk, zeros68, sizeof(agg_pk)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, pk_ptr, 0) == 0); CHECK(ecount == 6); + CHECK(memcmp(&agg_pk, zeros68, sizeof(agg_pk)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, NULL, 0) == 0); + CHECK(ecount == 7); + CHECK(memcmp(&agg_pk, zeros68, sizeof(agg_pk)) == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, &pre_session, pk, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, &pre_session, pk, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(vrfy, scratch, &combined_pk, &pre_session, pk, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, pk_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, pk_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(vrfy, scratch, &agg_pk, &keyagg_cache, pk_ptr, 2) == 1); /** Tweaking */ ecount = 0; { - rustsecp256k1zkp_v0_4_0_xonly_pubkey tmp_internal_pk = combined_pk; rustsecp256k1zkp_v0_4_0_pubkey tmp_output_pk; - rustsecp256k1zkp_v0_4_0_musig_pre_session tmp_pre_session = pre_session; - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(ctx, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, tweak) == 1); - /* Reset pre_session */ - tmp_pre_session = pre_session; - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(none, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, tweak) == 0); + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(ctx, &tmp_output_pk, tweak, &tmp_keyagg_cache) == 1); + /* Reset keyagg_cache */ + tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(none, &tmp_output_pk, tweak, &tmp_keyagg_cache) == 0); + CHECK(memcmp(&tmp_output_pk, zeros68, sizeof(tmp_output_pk)) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(sign, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, tweak) == 0); + tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(sign, &tmp_output_pk, tweak, &tmp_keyagg_cache) == 0); + CHECK(memcmp(&tmp_output_pk, zeros68, sizeof(tmp_output_pk)) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, tweak) == 1); + tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_output_pk, tweak, &tmp_keyagg_cache) == 1); CHECK(ecount == 2); - tmp_pre_session = pre_session; - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, NULL, &tmp_output_pk, &tmp_internal_pk, tweak) == 0); + tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, NULL, tweak, &tmp_keyagg_cache) == 1); + CHECK(ecount == 2); + tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_output_pk, NULL, &tmp_keyagg_cache) == 0); + CHECK(ecount == 3); + CHECK(memcmp(&tmp_output_pk, zeros68, sizeof(tmp_output_pk)) == 0); + tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_output_pk, max64, &tmp_keyagg_cache) == 0); CHECK(ecount == 3); - /* Uninitialized pre_session */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &pre_session_uninitialized, &tmp_output_pk, &tmp_internal_pk, tweak) == 0); + CHECK(memcmp(&tmp_output_pk, zeros68, sizeof(tmp_output_pk)) == 0); + tmp_keyagg_cache = keyagg_cache; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_output_pk, tweak, NULL) == 0); CHECK(ecount == 4); - /* Using the same pre_session twice does not work */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, tweak) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, tweak) == 0); + CHECK(memcmp(&tmp_output_pk, zeros68, sizeof(tmp_output_pk)) == 0); + tmp_keyagg_cache = keyagg_cache; + /* Uninitialized keyagg_cache */ + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_output_pk, tweak, &invalid_keyagg_cache) == 0); CHECK(ecount == 5); - tmp_pre_session = pre_session; - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_pre_session, NULL, &tmp_internal_pk, tweak) == 0); + CHECK(memcmp(&tmp_output_pk, zeros68, sizeof(tmp_output_pk)) == 0); + /* Using the same keyagg_cache twice does not work */ + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_output_pk, tweak, &tmp_keyagg_cache) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_output_pk, tweak, &tmp_keyagg_cache) == 0); CHECK(ecount == 6); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_pre_session, &tmp_output_pk, NULL, tweak) == 0); - CHECK(ecount == 7); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, NULL) == 0); - CHECK(ecount == 8); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(vrfy, &tmp_pre_session, &tmp_output_pk, &tmp_internal_pk, ones) == 0); - CHECK(ecount == 8); + CHECK(memcmp(&tmp_output_pk, zeros68, sizeof(tmp_output_pk)) == 0); } /** Session creation **/ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(none, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(none, &secnonce[0], &pubnonce[0], session_id[0], sk[0], msg, &keyagg_cache, max64) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(vrfy, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(vrfy, &secnonce[0], &pubnonce[0], session_id[0], sk[0], msg, &keyagg_cache, max64) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], session_id[0], sk[0], msg, &keyagg_cache, max64) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, NULL, signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, NULL, &pubnonce[0], session_id[0], sk[0], msg, &keyagg_cache, max64) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], NULL, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], NULL, session_id[0], sk[0], msg, &keyagg_cache, max64) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, NULL, session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], NULL, sk[0], msg, &keyagg_cache, max64) == 0); + CHECK(ecount == 5); + CHECK(memcmp(&secnonce[0], zeros68, sizeof(secnonce[0])) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], session_id[0], NULL, msg, &keyagg_cache, max64) == 1); CHECK(ecount == 5); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], NULL, msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], session_id[0], sk[0], NULL, &keyagg_cache, max64) == 1); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], session_id[0], sk[0], msg, NULL, max64) == 1); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], session_id[0], sk[0], msg, &invalid_keyagg_cache, max64) == 0); CHECK(ecount == 6); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], NULL, &combined_pk, &pre_session, 2, 0, sk[0]) == 1); + CHECK(memcmp(&secnonce[0], zeros68, sizeof(secnonce[0])) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], session_id[0], sk[0], msg, &keyagg_cache, NULL) == 1); CHECK(ecount == 6); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, NULL, &pre_session, 2, 0, sk[0]) == 0); - CHECK(ecount == 7); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, NULL, 2, 0, sk[0]) == 0); - CHECK(ecount == 8); - /* Uninitialized pre_session */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session_uninitialized, 2, 0, sk[0]) == 0); - CHECK(ecount == 9); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 0, 0, sk[0]) == 0); - CHECK(ecount == 10); - /* If more than UINT32_MAX fits in a size_t, test that session_init - * rejects n_signers that high. */ - if (SIZE_MAX > UINT32_MAX) { - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, ((size_t) UINT32_MAX) + 2, 0, sk[0]) == 0); - CHECK(ecount == 11); - } else { - ecount = 11; - } - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, NULL) == 0); - CHECK(ecount == 12); - /* secret key overflows */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, ones) == 0); - CHECK(ecount == 12); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[0], signer0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(sign, &session[1], signer1, nonce_commitment[1], session_id[1], msg, &combined_pk, &pre_session, 2, 1, sk[1]) == 1); - ncs[0] = nonce_commitment[0]; - ncs[1] = nonce_commitment[1]; + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[0], &pubnonce[0], session_id[0], sk[0], NULL, NULL, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(sign, &secnonce[1], &pubnonce[1], session_id[1], sk[1], NULL, NULL, NULL) == 1); + /** Serialize and parse public nonces **/ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, msg, &combined_pk, &pre_session, ncs, 2) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, NULL, verifier_signer_data, msg, &combined_pk, &pre_session, ncs, 2) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize(none, pubnonce_ser, &pubnonce[0]) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize(none, NULL, &pubnonce[0]) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, NULL, &combined_pk, &pre_session, ncs, 2) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize(none, pubnonce_ser, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, msg, NULL, &pre_session, ncs, 2) == 0); + CHECK(memcmp(zeros68, pubnonce_ser, sizeof(pubnonce_ser)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize(none, pubnonce_ser, &invalid_pubnonce) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, msg, &combined_pk, NULL, ncs, 2) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, msg, &combined_pk, &pre_session, NULL, 2) == 0); - CHECK(ecount == 5); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, msg, &combined_pk, &pre_session, ncs, 0) == 0); - CHECK(ecount == 6); - if (SIZE_MAX > UINT32_MAX) { - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, msg, &combined_pk, &pre_session, ncs, ((size_t) UINT32_MAX) + 2) == 0); - CHECK(ecount == 7); - } else { - ecount = 7; - } - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init_verifier(none, &verifier_session, verifier_signer_data, msg, &combined_pk, &pre_session, ncs, 2) == 1); + CHECK(memcmp(zeros68, pubnonce_ser, sizeof(pubnonce_ser)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize(none, pubnonce_ser, &pubnonce[0]) == 1); - /** Signing step 0 -- exchange nonce commitments */ ecount = 0; - { - unsigned char nonce[32]; - rustsecp256k1zkp_v0_4_0_musig_session session_0_tmp; - - memcpy(&session_0_tmp, &session[0], sizeof(session_0_tmp)); - - /* Can obtain public nonce after commitments have been exchanged; still can't sign */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session_0_tmp, signer0, nonce, ncs, 2, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &session_0_tmp, &partial_sig[0]) == 0); - CHECK(ecount == 1); - } + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse(none, &pubnonce[0], pubnonce_ser) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse(none, NULL, pubnonce_ser) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse(none, &pubnonce[0], NULL) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse(none, &pubnonce[0], zeros68) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse(none, &pubnonce[0], pubnonce_ser) == 1); - /** Signing step 1 -- exchange nonces */ + /** Receive nonces and aggregate**/ ecount = 0; - { - unsigned char public_nonce[3][32]; - rustsecp256k1zkp_v0_4_0_musig_session session_0_tmp; - - memcpy(&session_0_tmp, &session[0], sizeof(session_0_tmp)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session_0_tmp, signer0, public_nonce[0], ncs, 2, NULL) == 1); - CHECK(ecount == 0); - /* Reset session */ - memcpy(&session_0_tmp, &session[0], sizeof(session_0_tmp)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, NULL, signer0, public_nonce[0], ncs, 2, NULL) == 0); - CHECK(ecount == 1); - /* uninitialized session */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session_uninitialized, signer0, public_nonce[0], ncs, 2, NULL) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session_0_tmp, NULL, public_nonce[0], ncs, 2, NULL) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session_0_tmp, signer0, NULL, ncs, 2, NULL) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session_0_tmp, signer0, public_nonce[0], NULL, 2, NULL) == 0); - CHECK(ecount == 5); - /* Number of commitments and number of signers are different */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session_0_tmp, signer0, public_nonce[0], ncs, 1, NULL) == 0); - CHECK(ecount == 6); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(none, &aggnonce, pubnonce_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(none, NULL, pubnonce_ptr, 2) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(none, &aggnonce, NULL, 2) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(none, &aggnonce, pubnonce_ptr, 0) == 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(none, &aggnonce, invalid_pubnonce_ptr, 1) == 0); + CHECK(ecount == 4); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(none, &aggnonce, inf_pubnonce_ptr, 2) == 0); + CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session[0], signer0, public_nonce[0], ncs, 2, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(none, &session[1], signer1, public_nonce[1], ncs, 2, NULL) == 1); + /** Serialize and parse aggregate nonces **/ + ecount = 0; + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize(none, aggnonce_ser, &aggnonce) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize(none, NULL, &aggnonce) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize(none, aggnonce_ser, NULL) == 0); + CHECK(ecount == 2); + CHECK(memcmp(zeros68, aggnonce_ser, sizeof(aggnonce_ser)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize(none, aggnonce_ser, (rustsecp256k1zkp_v0_4_0_musig_aggnonce*) &invalid_pubnonce) == 0); + CHECK(ecount == 3); + CHECK(memcmp(zeros68, aggnonce_ser, sizeof(aggnonce_ser)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize(none, aggnonce_ser, &aggnonce) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &signer0[0], public_nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &signer0[1], public_nonce[0]) == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &signer0[1], public_nonce[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &signer0[1], public_nonce[1]) == 1); - CHECK(ecount == 6); + ecount = 0; + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse(none, &aggnonce, aggnonce_ser) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse(none, NULL, aggnonce_ser) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse(none, &aggnonce, NULL) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse(none, &aggnonce, zeros68) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse(none, &aggnonce, aggnonce_ser) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, NULL, public_nonce[0]) == 0); - CHECK(ecount == 7); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &signer1[0], NULL) == 0); - CHECK(ecount == 8); - - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &signer1[0], public_nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &signer1[1], public_nonce[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &verifier_signer_data[0], public_nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(none, &verifier_signer_data[1], public_nonce[1]) == 1); - - ecount = 0; - memcpy(&session_0_tmp, &session[0], sizeof(session_0_tmp)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session_0_tmp, signer0, 2, &combined_nonce_parity, &adaptor) == 1); - memcpy(&session_0_tmp, &session[0], sizeof(session_0_tmp)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, NULL, signer0, 2, &combined_nonce_parity, &adaptor) == 0); - CHECK(ecount == 1); - /* Uninitialized session */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session_uninitialized, signer0, 2, &combined_nonce_parity, &adaptor) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session_0_tmp, NULL, 2, &combined_nonce_parity, &adaptor) == 0); - CHECK(ecount == 3); - /* Number of signers differs from number during intialization */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session_0_tmp, signer0, 1, &combined_nonce_parity, &adaptor) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session_0_tmp, signer0, 2, NULL, &adaptor) == 1); - CHECK(ecount == 4); - memcpy(&session_0_tmp, &session[0], sizeof(session_0_tmp)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session_0_tmp, signer0, 2, &combined_nonce_parity, NULL) == 1); + /** Process nonces **/ + ecount = 0; + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(none, &session, &aggnonce, msg, &keyagg_cache, &adaptor) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(sign, &session, &aggnonce, msg, &keyagg_cache, &adaptor) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, NULL, &aggnonce, msg, &keyagg_cache, &adaptor) == 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, NULL, msg, &keyagg_cache, &adaptor) == 0); + CHECK(ecount == 4); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, (rustsecp256k1zkp_v0_4_0_musig_aggnonce*) &invalid_pubnonce, msg, &keyagg_cache, &adaptor) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, &aggnonce, NULL, &keyagg_cache, &adaptor) == 0); + CHECK(ecount == 6); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, &aggnonce, msg, NULL, &adaptor) == 0); + CHECK(ecount == 7); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, &aggnonce, msg, &invalid_keyagg_cache, &adaptor) == 0); + CHECK(ecount == 8); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, &aggnonce, msg, &keyagg_cache, NULL) == 1); + CHECK(ecount == 8); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, &aggnonce, msg, &keyagg_cache, (rustsecp256k1zkp_v0_4_0_pubkey *)&invalid_pk) == 0); + CHECK(ecount == 9); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session[0], signer0, 2, &combined_nonce_parity, &adaptor) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &session[1], signer0, 2, &combined_nonce_parity, &adaptor) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(none, &verifier_session, verifier_signer_data, 2, &combined_nonce_parity, &adaptor) == 1); - } + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(vrfy, &session, &aggnonce, msg, &keyagg_cache, &adaptor) == 1); - /** Signing step 2 -- partial signatures */ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &session[0], &partial_sig[0]) == 1); - CHECK(ecount == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, NULL, &partial_sig[0]) == 0); + memcpy(&secnonce_tmp, &secnonce[0], sizeof(secnonce_tmp)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, &keypair[0], &keyagg_cache, &session) == 1); + /* The session_id is set to 0 and subsequent signing attempts fail */ + CHECK(memcmp(&secnonce_tmp, zeros68, sizeof(secnonce_tmp)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, &keypair[0], &keyagg_cache, &session) == 0); CHECK(ecount == 1); - /* Uninitialized session */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &session_uninitialized, &partial_sig[0]) == 0); + memcpy(&secnonce_tmp, &secnonce[0], sizeof(secnonce_tmp)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, NULL, &secnonce_tmp, &keypair[0], &keyagg_cache, &session) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &session[0], NULL) == 0); + memcpy(&secnonce_tmp, &secnonce[0], sizeof(secnonce_tmp)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], NULL, &keypair[0], &keyagg_cache, &session) == 0); CHECK(ecount == 3); - - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &session[0], &partial_sig[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &session[1], &partial_sig[1]) == 1); - /* observer can't sign */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &verifier_session, &partial_sig[2]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &invalid_secnonce, &keypair[0], &keyagg_cache, &session) == 0); CHECK(ecount == 4); + memcpy(&secnonce_tmp, &secnonce[0], sizeof(secnonce_tmp)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, NULL, &keyagg_cache, &session) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, &invalid_keypair, &keyagg_cache, &session) == 0); + CHECK(ecount == 6); + memcpy(&secnonce_tmp, &secnonce[0], sizeof(secnonce_tmp)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, &keypair[0], NULL, &session) == 0); + CHECK(ecount == 7); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, &keypair[0], &invalid_keyagg_cache, &session) == 0); + CHECK(ecount == 8); + memcpy(&secnonce_tmp, &secnonce[0], sizeof(secnonce_tmp)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, &keypair[0], &keyagg_cache, NULL) == 0); + CHECK(ecount == 9); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce_tmp, &keypair[0], &keyagg_cache, &invalid_session) == 0); + CHECK(ecount == 10); + + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[0], &secnonce[0], &keypair[0], &keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(none, &partial_sig[1], &secnonce[1], &keypair[1], &keyagg_cache, &session) == 1); ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_signature_serialize(none, buf, &partial_sig[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_signature_serialize(none, NULL, &partial_sig[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_serialize(none, buf, &partial_sig[0]) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_serialize(none, NULL, &partial_sig[0]) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_signature_serialize(none, buf, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_serialize(none, buf, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_signature_parse(none, &partial_sig[0], buf) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_signature_parse(none, NULL, buf) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_parse(none, &partial_sig[0], buf) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_parse(none, NULL, buf) == 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_parse(none, &partial_sig[0], max64) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_signature_parse(none, &partial_sig[0], NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_parse(none, &partial_sig[0], NULL) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_signature_parse(none, &partial_sig_overflow, ones) == 1); /** Partial signature verification */ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(none, &session[0], &signer0[0], &partial_sig[0], &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(none, &partial_sig[0], &pubnonce[0], &pk[0], &keyagg_cache, &session) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(sign, &session[0], &signer0[0], &partial_sig[0], &pk[0]) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], &signer0[0], &partial_sig[0], &pk[0]) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(sign, &partial_sig[0], &pubnonce[0], &pk[0], &keyagg_cache, &session) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], &signer0[0], &partial_sig[1], &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], &pk[0], &keyagg_cache, &session) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, NULL, &signer0[0], &partial_sig[0], &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, NULL, &pubnonce[0], &pk[0], &keyagg_cache, &session) == 0); CHECK(ecount == 3); - /* Unitialized session */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session_uninitialized, &signer0[0], &partial_sig[0], &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &invalid_partial_sig, &pubnonce[0], &pk[0], &keyagg_cache, &session) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], NULL, &partial_sig[0], &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], NULL, &pk[0], &keyagg_cache, &session) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], &signer0[0], NULL, &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &invalid_pubnonce, &pk[0], &keyagg_cache, &session) == 0); CHECK(ecount == 6); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], &signer0[0], &partial_sig_overflow, &pk[0]) == 0); - CHECK(ecount == 6); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], &signer0[0], &partial_sig[0], NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], NULL, &keyagg_cache, &session) == 0); CHECK(ecount == 7); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], &invalid_pk, &keyagg_cache, &session) == 0); + CHECK(ecount == 8); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], &pk[0], NULL, &session) == 0); + CHECK(ecount == 9); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], &pk[0], &invalid_keyagg_cache, &session) == 0); + CHECK(ecount == 10); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], &pk[0], &keyagg_cache, NULL) == 0); + CHECK(ecount == 11); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], &pk[0], &keyagg_cache, &invalid_session) == 0); + CHECK(ecount == 12); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], &signer0[0], &partial_sig[0], &pk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[1], &signer1[0], &partial_sig[0], &pk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[0], &signer0[1], &partial_sig[1], &pk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &session[1], &signer1[1], &partial_sig[1], &pk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &verifier_session, &verifier_signer_data[0], &partial_sig[0], &pk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &verifier_session, &verifier_signer_data[1], &partial_sig[1], &pk[1]) == 1); - CHECK(ecount == 7); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[0], &pubnonce[0], &pk[0], &keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(vrfy, &partial_sig[1], &pubnonce[1], &pk[1], &keyagg_cache, &session) == 1); - /** Adaptor signature verification */ - memcpy(&partial_sig_adapted[1], &partial_sig[1], sizeof(partial_sig_adapted[1])); + /** Sign aggregation and verification */ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(none, &partial_sig_adapted[0], &partial_sig[0], sec_adaptor, combined_nonce_parity) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(none, NULL, &partial_sig[0], sec_adaptor, 0) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, pre_sig, &session, partial_sig_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, NULL, &session, partial_sig_ptr, 2) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(none, &partial_sig_adapted[0], NULL, sec_adaptor, 0) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, pre_sig, NULL, partial_sig_ptr, 2) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(none, &partial_sig_adapted[0], &partial_sig_overflow, sec_adaptor, combined_nonce_parity) == 0); - CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(none, &partial_sig_adapted[0], &partial_sig[0], NULL, 0) == 0); - CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(none, &partial_sig_adapted[0], &partial_sig[0], ones, combined_nonce_parity) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, pre_sig, &invalid_session, partial_sig_ptr, 2) == 0); CHECK(ecount == 3); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, pre_sig, &session, NULL, 2) == 0); + CHECK(ecount == 4); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, pre_sig, &session, invalid_partial_sig_ptr, 2) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, pre_sig, &session, partial_sig_ptr, 0) == 1); - /** Signing combining and verification */ - ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], final_sig, partial_sig_adapted, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], final_sig_cmp, partial_sig_adapted, 2) == 1); - CHECK(memcmp(final_sig, final_sig_cmp, sizeof(final_sig)) == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], final_sig_cmp, partial_sig_adapted, 2) == 1); - CHECK(memcmp(final_sig, final_sig_cmp, sizeof(final_sig)) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(none, pre_sig, &session, partial_sig_ptr, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, NULL, final_sig, partial_sig_adapted, 2) == 0); + /** Adaptor signature verification */ + ecount = 0; + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_parity(none, &nonce_parity, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_parity(none, NULL, &session) == 0); CHECK(ecount == 1); - /* Unitialized session */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session_uninitialized, final_sig, partial_sig_adapted, 2) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_parity(none, &nonce_parity, NULL) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], NULL, partial_sig_adapted, 2) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_parity(none, &nonce_parity, &invalid_session) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], final_sig, NULL, 2) == 0); - CHECK(ecount == 4); + + ecount = 0; { - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig_tmp[2]; - partial_sig_tmp[0] = partial_sig_adapted[0]; - partial_sig_tmp[1] = partial_sig_overflow; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], final_sig, partial_sig_tmp, 2) == 0); + unsigned char tmp_sig[64]; + memcpy(tmp_sig, pre_sig, sizeof(tmp_sig)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(none, tmp_sig, sec_adaptor, nonce_parity) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(none, NULL, sec_adaptor, 0) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(none, max64, sec_adaptor, 0) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(none, tmp_sig, NULL, 0) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(none, tmp_sig, max64, nonce_parity) == 0); + CHECK(ecount == 2); } - CHECK(ecount == 4); - /* Wrong number of partial sigs */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], final_sig, partial_sig_adapted, 1) == 0); - CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(none, &session[0], final_sig, partial_sig_adapted, 2) == 1); - CHECK(ecount == 4); - - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, final_sig, msg, &combined_pk) == 1); + memcpy(final_sig, pre_sig, sizeof(final_sig)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(none, final_sig, sec_adaptor, nonce_parity) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, final_sig, msg, sizeof(msg), &agg_pk) == 1); /** Secret adaptor can be extracted from signature */ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, sec_adaptor1, final_sig, partial_sig, 2, combined_nonce_parity) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(none, sec_adaptor1, final_sig, pre_sig, nonce_parity) == 1); CHECK(memcmp(sec_adaptor, sec_adaptor1, 32) == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, NULL, final_sig, partial_sig, 2, 0) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(none, NULL, final_sig, pre_sig, 0) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, sec_adaptor1, NULL, partial_sig, 2, 0) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(none, sec_adaptor1, NULL, pre_sig, 0) == 0); CHECK(ecount == 2); - { - unsigned char final_sig_tmp[64]; - memcpy(final_sig_tmp, final_sig, sizeof(final_sig_tmp)); - memcpy(&final_sig_tmp[32], ones, 32); - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, sec_adaptor1, final_sig_tmp, partial_sig, 2, combined_nonce_parity) == 0); - } + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(none, sec_adaptor1, max64, pre_sig, 0) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, sec_adaptor1, final_sig, NULL, 2, 0) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(none, sec_adaptor1, final_sig, NULL, 0) == 0); CHECK(ecount == 3); - { - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig_tmp[2]; - partial_sig_tmp[0] = partial_sig[0]; - partial_sig_tmp[1] = partial_sig_overflow; - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, sec_adaptor1, final_sig, partial_sig_tmp, 2, combined_nonce_parity) == 0); - } + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(none, sec_adaptor1, final_sig, max64, 0) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, sec_adaptor1, final_sig, partial_sig, 0, 0) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(none, sec_adaptor1, final_sig, partial_sig, 2, 1) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(none, sec_adaptor1, final_sig, pre_sig, 1) == 1); /** cleanup **/ - memset(&session, 0, sizeof(session)); rustsecp256k1zkp_v0_4_0_context_destroy(none); rustsecp256k1zkp_v0_4_0_context_destroy(sign); rustsecp256k1zkp_v0_4_0_context_destroy(vrfy); } -/* Initializes two sessions, one use the given parameters (session_id, - * nonce_commitments, etc.) except that `session_tmp` uses new signers with different - * public keys. The point of this test is to call `musig_session_get_public_nonce` - * with signers from `session_tmp` who have different public keys than the correct - * ones and return the resulting messagehash. This should not result in a different - * messagehash because the public keys of the signers are only used during session - * initialization. */ -void musig_state_machine_diff_signer_msghash_test(unsigned char *msghash, rustsecp256k1zkp_v0_4_0_xonly_pubkey *pks, rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, const unsigned char * const *nonce_commitments, unsigned char *msg, unsigned char *nonce_other, unsigned char *sk, unsigned char *session_id) { - rustsecp256k1zkp_v0_4_0_musig_session session; - rustsecp256k1zkp_v0_4_0_musig_session session_tmp; - unsigned char nonce_commitment[32]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers_tmp[2]; - unsigned char sk_dummy[32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey pks_tmp[2]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk_tmp; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session_tmp; - unsigned char nonce[32]; - - /* Set up signers with different public keys */ - rustsecp256k1zkp_v0_4_0_testrand256(sk_dummy); - pks_tmp[0] = pks[0]; - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pks_tmp[1], sk_dummy) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, NULL, &combined_pk_tmp, &pre_session_tmp, pks_tmp, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session_tmp, signers_tmp, nonce_commitment, session_id, msg, &combined_pk_tmp, &pre_session_tmp, 2, 1, sk_dummy) == 1); - - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session, signers, nonce_commitment, session_id, msg, combined_pk, pre_session, 2, 0, sk) == 1); - CHECK(memcmp(nonce_commitment, nonce_commitments[1], 32) == 0); - /* Call get_public_nonce with different signers than the signers the session was - * initialized with. */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session_tmp, signers, nonce, nonce_commitments, 2, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session, signers_tmp, nonce, nonce_commitments, 2, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers[0], nonce_other) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers[1], nonce) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session, signers, 2, NULL, NULL) == 1); - - rustsecp256k1zkp_v0_4_0_musig_compute_messagehash(ctx, msghash, &session); -} +void musig_nonce_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) { + rustsecp256k1zkp_v0_4_0_scalar k1[2], k2[2]; -/* Creates a new session (with a different session id) and tries to use that session - * to combine nonces with given signers_other. This should fail, because the nonce - * commitments of signers_other do not match the nonce commitments the new session - * was initialized with. If do_test is 0, the correct signers are being used and - * therefore the function should return 1. */ -int musig_state_machine_diff_signers_combine_nonce_test(rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, unsigned char *nonce_commitment_other, unsigned char *nonce_other, unsigned char *msg, unsigned char *sk, rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers_other, int do_test) { - rustsecp256k1zkp_v0_4_0_musig_session session; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data *signers_to_use; - unsigned char nonce_commitment[32]; - unsigned char session_id[32]; - unsigned char nonce[32]; - const unsigned char *ncs[2]; - - /* Initialize new signers */ - rustsecp256k1zkp_v0_4_0_testrand256(session_id); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session, signers, nonce_commitment, session_id, msg, combined_pk, pre_session, 2, 1, sk) == 1); - ncs[0] = nonce_commitment_other; - ncs[1] = nonce_commitment; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session, signers, nonce, ncs, 2, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers[0], nonce_other) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers[1], nonce) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers[1], nonce) == 1); - rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session, signers_other, 2, NULL, NULL); - if (do_test) { - signers_to_use = signers_other; - } else { - signers_to_use = signers; - } - return rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session, signers_to_use, 2, NULL, NULL); + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k1, args[0], args[1], args[2], args[3], args[4]); + rustsecp256k1zkp_v0_4_0_testrand_flip(args[n_flip], n_bytes); + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k2, args[0], args[1], args[2], args[3], args[4]); + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&k1[0], &k2[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&k1[1], &k2[1]) == 0); } -/* Initializaes a session with the given session_id, signers, pk, msg etc. - * parameters but without a message. Will test that the message must be - * provided with `get_public_nonce`. - */ -void musig_state_machine_late_msg_test(rustsecp256k1zkp_v0_4_0_xonly_pubkey *pks, rustsecp256k1zkp_v0_4_0_xonly_pubkey *combined_pk, rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session, unsigned char *nonce_commitment_other, unsigned char *nonce_other, unsigned char *sk, unsigned char *session_id, unsigned char *msg) { - /* Create context for testing ARG_CHECKs by setting an illegal_callback. */ - rustsecp256k1zkp_v0_4_0_context *ctx_tmp = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); - int ecount = 0; - rustsecp256k1zkp_v0_4_0_musig_session session; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers[2]; - unsigned char nonce_commitment[32]; - const unsigned char *ncs[2]; - unsigned char nonce[32]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig; - - rustsecp256k1zkp_v0_4_0_context_set_illegal_callback(ctx_tmp, counting_illegal_callback_fn, &ecount); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session, signers, nonce_commitment, session_id, NULL, combined_pk, pre_session, 2, 1, sk) == 1); - ncs[0] = nonce_commitment_other; - ncs[1] = nonce_commitment; - - /* Trying to get the nonce without providing a message fails. */ - CHECK(ecount == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx_tmp, &session, signers, nonce, ncs, 2, NULL) == 0); - CHECK(ecount == 1); - - /* Providing a message should make get_public_nonce succeed. */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session, signers, nonce, ncs, 2, msg) == 1); - /* Trying to set the message again fails. */ - CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx_tmp, &session, signers, nonce, ncs, 2, msg) == 0); - CHECK(ecount == 2); - - /* Check that it's working */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers[0], nonce_other) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers[1], nonce) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session, signers, 2, NULL, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &session, &partial_sig)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session, &signers[1], &partial_sig, &pks[1])); - rustsecp256k1zkp_v0_4_0_context_destroy(ctx_tmp); +void musig_nonce_null(unsigned char **args, size_t n_flip) { + rustsecp256k1zkp_v0_4_0_scalar k1[2], k2[2]; + unsigned char *args_tmp; + + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k1, args[0], args[1], args[2], args[3], args[4]); + args_tmp = args[n_flip]; + args[n_flip] = NULL; + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k2, args[0], args[1], args[2], args[3], args[4]); + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&k1[0], &k2[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&k1[1], &k2[1]) == 0); + args[n_flip] = args_tmp; } -void musig_state_machine_tests(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { - rustsecp256k1zkp_v0_4_0_context *ctx_tmp = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_VERIFY | SECP256K1_CONTEXT_VERIFY); - size_t i; - rustsecp256k1zkp_v0_4_0_musig_session session[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers0[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers1[2]; - unsigned char nonce_commitment[2][32]; - unsigned char session_id[2][32]; +void musig_nonce_test(void) { + unsigned char *args[5]; + unsigned char session_id[32]; + unsigned char sk[32]; unsigned char msg[32]; - unsigned char sk[2][32]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[2]; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session; - unsigned char nonce[2][32]; - const unsigned char *ncs[2]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig[2]; - unsigned char sig[64]; - unsigned char msghash1[32]; - unsigned char msghash2[32]; - int ecount; - - rustsecp256k1zkp_v0_4_0_context_set_illegal_callback(ctx_tmp, counting_illegal_callback_fn, &ecount); - ecount = 0; - - /* Run state machine with the same objects twice to test that it's allowed to - * reinitialize session and session_signer_data. */ - for (i = 0; i < 2; i++) { - /* Setup */ - rustsecp256k1zkp_v0_4_0_testrand256(session_id[0]); - rustsecp256k1zkp_v0_4_0_testrand256(session_id[1]); - rustsecp256k1zkp_v0_4_0_testrand256(sk[0]); - rustsecp256k1zkp_v0_4_0_testrand256(sk[1]); - rustsecp256k1zkp_v0_4_0_testrand256(msg); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[0], sk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[1], sk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, scratch, &combined_pk, &pre_session, pk, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session[0], signers0, nonce_commitment[0], session_id[0], msg, &combined_pk, &pre_session, 2, 0, sk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session[1], signers1, nonce_commitment[1], session_id[1], msg, &combined_pk, &pre_session, 2, 1, sk[1]) == 1); - /* Can't combine nonces unless we're through round 1 already */ - ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx_tmp, &session[0], signers0, 2, NULL, NULL) == 0); - CHECK(ecount == 1); - - /* Set nonce commitments */ - ncs[0] = nonce_commitment[0]; - ncs[1] = nonce_commitment[1]; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session[0], signers0, nonce[0], ncs, 2, NULL) == 1); - /* Calling the function again is not okay */ - ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx_tmp, &session[0], signers0, nonce[0], ncs, 2, NULL) == 0); - CHECK(ecount == 1); - - /* Get nonce for signer 1 */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session[1], signers1, nonce[1], ncs, 2, NULL) == 1); - - /* Set nonces */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers0[0], nonce[0]) == 1); - /* Can't set nonce that doesn't match nonce commitment */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers0[1], nonce[0]) == 0); - /* Set correct nonce */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers0[1], nonce[1]) == 1); - - /* Combine nonces */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session[0], signers0, 2, NULL, NULL) == 1); - /* Not everyone is present from signer 1's view */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session[1], signers1, 2, NULL, NULL) == 0); - /* Make everyone present */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers1[0], nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers1[1], nonce[1]) == 1); - - /* Can't combine nonces from signers of a different session */ - CHECK(musig_state_machine_diff_signers_combine_nonce_test(&combined_pk, &pre_session, nonce_commitment[0], nonce[0], msg, sk[1], signers1, 1) == 0); - CHECK(musig_state_machine_diff_signers_combine_nonce_test(&combined_pk, &pre_session, nonce_commitment[0], nonce[0], msg, sk[1], signers1, 0) == 1); - - /* Partially sign */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &session[0], &partial_sig[0]) == 1); - /* Can't verify, sign or combine signatures until nonce is combined */ - ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx_tmp, &session[1], &signers1[0], &partial_sig[0], &pk[0]) == 0); - CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx_tmp, &session[1], &partial_sig[1]) == 0); - CHECK(ecount == 2); - memset(&partial_sig[1], 0, sizeof(partial_sig[1])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(ctx_tmp, &session[1], sig, partial_sig, 2) == 0); - CHECK(ecount == 3); - - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session[1], signers1, 2, NULL, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[1], &signers1[0], &partial_sig[0], &pk[0]) == 1); - /* messagehash should be the same as a session whose get_public_nonce was called - * with different signers (i.e. they diff in public keys). This is because the - * public keys of the signers is set in stone when initializing the session. */ - rustsecp256k1zkp_v0_4_0_musig_compute_messagehash(ctx, msghash1, &session[1]); - musig_state_machine_diff_signer_msghash_test(msghash2, pk, &combined_pk, &pre_session, ncs, msg, nonce[0], sk[1], session_id[1]); - CHECK(memcmp(msghash1, msghash2, 32) == 0); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &session[1], &partial_sig[1]) == 1); - - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[1], &signers1[1], &partial_sig[1], &pk[1]) == 1); - /* Wrong signature */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[1], &signers1[1], &partial_sig[0], &pk[1]) == 0); - /* Can't get the public nonce until msg is set */ - musig_state_machine_late_msg_test(pk, &combined_pk, &pre_session, nonce_commitment[0], nonce[0], sk[1], session_id[1], msg); + unsigned char agg_pk[32]; + unsigned char extra_input[32]; + int i, j; + rustsecp256k1zkp_v0_4_0_scalar k[5][2]; + + rustsecp256k1zkp_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1zkp_v0_4_0_test_rng, session_id, sizeof(session_id)); + rustsecp256k1zkp_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1zkp_v0_4_0_test_rng, sk, sizeof(sk)); + rustsecp256k1zkp_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1zkp_v0_4_0_test_rng, msg, sizeof(msg)); + rustsecp256k1zkp_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1zkp_v0_4_0_test_rng, agg_pk, sizeof(agg_pk)); + rustsecp256k1zkp_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1zkp_v0_4_0_test_rng, extra_input, sizeof(extra_input)); + + /* Check that a bitflip in an argument results in different nonces. */ + args[0] = session_id; + args[1] = sk; + args[2] = msg; + args[3] = agg_pk; + args[4] = extra_input; + for (i = 0; i < count; i++) { + musig_nonce_bitflip(args, 0, sizeof(session_id)); + musig_nonce_bitflip(args, 1, sizeof(sk)); + musig_nonce_bitflip(args, 2, sizeof(msg)); + musig_nonce_bitflip(args, 3, sizeof(agg_pk)); + musig_nonce_bitflip(args, 4, sizeof(extra_input)); + } + /* Check that if any argument is NULL, a different nonce is produced than if + * any other argument is NULL. */ + memcpy(sk, session_id, sizeof(sk)); + memcpy(msg, session_id, sizeof(msg)); + memcpy(agg_pk, session_id, sizeof(agg_pk)); + memcpy(extra_input, session_id, sizeof(extra_input)); + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k[0], args[0], args[1], args[2], args[3], args[4]); + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k[1], args[0], NULL, args[2], args[3], args[4]); + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k[2], args[0], args[1], NULL, args[3], args[4]); + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k[3], args[0], args[1], args[2], NULL, args[4]); + rustsecp256k1zkp_v0_4_0_nonce_function_musig(k[4], args[0], args[1], args[2], args[3], NULL); + for (i = 0; i < 4; i++) { + for (j = i+1; j < 5; j++) { + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&k[i][0], &k[j][0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&k[i][1], &k[j][1]) == 0); + } } - rustsecp256k1zkp_v0_4_0_context_destroy(ctx_tmp); } void scriptless_atomic_swap(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { @@ -725,121 +610,115 @@ void scriptless_atomic_swap(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { * sending a-coins to signer 1, while signer 1 is sending b-coins to signer * 0. Signer 0 produces the adaptor signatures. */ unsigned char final_sig_a[64]; + unsigned char pre_sig_b[64]; unsigned char final_sig_b[64]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig_a[2]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig_b_adapted[2]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig_b[2]; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig_a[2]; + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig_a_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig_b[2]; + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig_b_ptr[2]; unsigned char sec_adaptor[32]; unsigned char sec_adaptor_extracted[32]; rustsecp256k1zkp_v0_4_0_pubkey pub_adaptor; - - unsigned char seckey_a[2][32]; - unsigned char seckey_b[2][32]; + unsigned char sk_a[2][32]; + unsigned char sk_b[2][32]; + rustsecp256k1zkp_v0_4_0_keypair keypair_a[2]; + rustsecp256k1zkp_v0_4_0_keypair keypair_b[2]; rustsecp256k1zkp_v0_4_0_xonly_pubkey pk_a[2]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_a_ptr[2]; rustsecp256k1zkp_v0_4_0_xonly_pubkey pk_b[2]; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session_a; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session_b; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk_a; - rustsecp256k1zkp_v0_4_0_xonly_pubkey combined_pk_b; - rustsecp256k1zkp_v0_4_0_musig_session musig_session_a[2]; - rustsecp256k1zkp_v0_4_0_musig_session musig_session_b[2]; - unsigned char noncommit_a[2][32]; - unsigned char noncommit_b[2][32]; - const unsigned char *noncommit_a_ptr[2]; - const unsigned char *noncommit_b_ptr[2]; - unsigned char pubnon_a[2][32]; - unsigned char pubnon_b[2][32]; - int combined_nonce_parity_a; - int combined_nonce_parity_b; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data data_a[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data data_b[2]; - - const unsigned char seed[32] = "still tired of choosing seeds..."; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_b_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache_a; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache_b; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk_a; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk_b; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce_a[2]; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce_b[2]; + rustsecp256k1zkp_v0_4_0_musig_pubnonce pubnonce_a[2]; + rustsecp256k1zkp_v0_4_0_musig_pubnonce pubnonce_b[2]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce_ptr_a[2]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce_ptr_b[2]; + rustsecp256k1zkp_v0_4_0_musig_aggnonce aggnonce_a; + rustsecp256k1zkp_v0_4_0_musig_aggnonce aggnonce_b; + rustsecp256k1zkp_v0_4_0_musig_session session_a, session_b; + int nonce_parity_a; + int nonce_parity_b; + unsigned char seed_a[2][32] = { "a0", "a1" }; + unsigned char seed_b[2][32] = { "b0", "b1" }; const unsigned char msg32_a[32] = "this is the message blockchain a"; const unsigned char msg32_b[32] = "this is the message blockchain b"; + int i; /* Step 1: key setup */ - rustsecp256k1zkp_v0_4_0_testrand256(seckey_a[0]); - rustsecp256k1zkp_v0_4_0_testrand256(seckey_a[1]); - rustsecp256k1zkp_v0_4_0_testrand256(seckey_b[0]); - rustsecp256k1zkp_v0_4_0_testrand256(seckey_b[1]); + for (i = 0; i < 2; i++) { + pk_a_ptr[i] = &pk_a[i]; + pk_b_ptr[i] = &pk_b[i]; + pubnonce_ptr_a[i] = &pubnonce_a[i]; + pubnonce_ptr_b[i] = &pubnonce_b[i]; + partial_sig_a_ptr[i] = &partial_sig_a[i]; + partial_sig_b_ptr[i] = &partial_sig_b[i]; + + rustsecp256k1zkp_v0_4_0_testrand256(sk_a[i]); + rustsecp256k1zkp_v0_4_0_testrand256(sk_b[i]); + CHECK(create_keypair_and_pk(&keypair_a[i], &pk_a[i], sk_a[i])); + CHECK(create_keypair_and_pk(&keypair_b[i], &pk_b[i], sk_b[i])); + } rustsecp256k1zkp_v0_4_0_testrand256(sec_adaptor); - - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk_a[0], seckey_a[0])); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk_a[1], seckey_a[1])); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk_b[0], seckey_b[0])); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk_b[1], seckey_b[1])); CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_create(ctx, &pub_adaptor, sec_adaptor)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, scratch, &combined_pk_a, &pre_session_a, pk_a, 2)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, scratch, &combined_pk_b, &pre_session_b, pk_b, 2)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, scratch, &agg_pk_a, &keyagg_cache_a, pk_a_ptr, 2)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, scratch, &agg_pk_b, &keyagg_cache_b, pk_b_ptr, 2)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &musig_session_a[0], data_a, noncommit_a[0], seed, msg32_a, &combined_pk_a, &pre_session_a, 2, 0, seckey_a[0])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &musig_session_a[1], data_a, noncommit_a[1], seed, msg32_a, &combined_pk_a, &pre_session_a, 2, 1, seckey_a[1])); - noncommit_a_ptr[0] = noncommit_a[0]; - noncommit_a_ptr[1] = noncommit_a[1]; - - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &musig_session_b[0], data_b, noncommit_b[0], seed, msg32_b, &combined_pk_b, &pre_session_b, 2, 0, seckey_b[0])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &musig_session_b[1], data_b, noncommit_b[1], seed, msg32_b, &combined_pk_b, &pre_session_b, 2, 1, seckey_b[1])); - noncommit_b_ptr[0] = noncommit_b[0]; - noncommit_b_ptr[1] = noncommit_b[1]; + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce_a[0], &pubnonce_a[0], seed_a[0], sk_a[0], NULL, NULL, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce_a[1], &pubnonce_a[1], seed_a[1], sk_a[1], NULL, NULL, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce_b[0], &pubnonce_b[0], seed_b[0], sk_b[0], NULL, NULL, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce_b[1], &pubnonce_b[1], seed_b[1], sk_b[1], NULL, NULL, NULL)); /* Step 2: Exchange nonces */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &musig_session_a[0], data_a, pubnon_a[0], noncommit_a_ptr, 2, NULL)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &musig_session_a[1], data_a, pubnon_a[1], noncommit_a_ptr, 2, NULL)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &musig_session_b[0], data_b, pubnon_b[0], noncommit_b_ptr, 2, NULL)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &musig_session_b[1], data_b, pubnon_b[1], noncommit_b_ptr, 2, NULL)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &data_a[0], pubnon_a[0])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &data_a[1], pubnon_a[1])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &data_b[0], pubnon_b[0])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &data_b[1], pubnon_b[1])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &musig_session_a[0], data_a, 2, &combined_nonce_parity_a, &pub_adaptor)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &musig_session_a[1], data_a, 2, NULL, &pub_adaptor)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &musig_session_b[0], data_b, 2, &combined_nonce_parity_b, &pub_adaptor)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &musig_session_b[1], data_b, 2, NULL, &pub_adaptor)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(ctx, &aggnonce_a, pubnonce_ptr_a, 2)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(ctx, &session_a, &aggnonce_a, msg32_a, &keyagg_cache_a, &pub_adaptor)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_parity(ctx, &nonce_parity_a, &session_a)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(ctx, &aggnonce_b, pubnonce_ptr_b, 2)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(ctx, &session_b, &aggnonce_b, msg32_b, &keyagg_cache_b, &pub_adaptor)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_parity(ctx, &nonce_parity_b, &session_b)); /* Step 3: Signer 0 produces partial signatures for both chains. */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &musig_session_a[0], &partial_sig_a[0])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &musig_session_b[0], &partial_sig_b[0])); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig_a[0], &secnonce_a[0], &keypair_a[0], &keyagg_cache_a, &session_a)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig_b[0], &secnonce_b[0], &keypair_b[0], &keyagg_cache_b, &session_b)); /* Step 4: Signer 1 receives partial signatures, verifies them and creates a * partial signature to send B-coins to signer 0. */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &musig_session_a[1], data_a, &partial_sig_a[0], &pk_a[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &musig_session_b[1], data_b, &partial_sig_b[0], &pk_b[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &musig_session_b[1], &partial_sig_b[1])); - - /* Step 5: Signer 0 adapts its own partial signature and combines it with the - * partial signature from signer 1. This results in a complete signature which - * is broadcasted by signer 0 to take B-coins. */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(ctx, &partial_sig_b_adapted[0], &partial_sig_b[0], sec_adaptor, combined_nonce_parity_b)); - memcpy(&partial_sig_b_adapted[1], &partial_sig_b[1], sizeof(partial_sig_b_adapted[1])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(ctx, &musig_session_b[0], final_sig_b, partial_sig_b_adapted, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig_b, msg32_b, &combined_pk_b) == 1); - - /* Step 6: Signer 1 extracts adaptor from the published signature, applies it to - * other partial signature, and takes A-coins. */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_secret_adaptor(ctx, sec_adaptor_extracted, final_sig_b, partial_sig_b, 2, combined_nonce_parity_b) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &partial_sig_a[0], &pubnonce_a[0], &pk_a[0], &keyagg_cache_a, &session_a) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &partial_sig_b[0], &pubnonce_b[0], &pk_b[0], &keyagg_cache_b, &session_b) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig_b[1], &secnonce_b[1], &keypair_b[1], &keyagg_cache_b, &session_b)); + + /* Step 5: Signer 0 aggregates its own partial signature with the partial + * signature from signer 1 and adapts it. This results in a complete + * signature which is broadcasted by signer 0 to take B-coins. */ + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(ctx, pre_sig_b, &session_b, partial_sig_b_ptr, 2) == 1); + memcpy(final_sig_b, pre_sig_b, sizeof(final_sig_b)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(ctx, final_sig_b, sec_adaptor, nonce_parity_b)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig_b, msg32_b, sizeof(msg32_b), &agg_pk_b) == 1); + + /* Step 6: Signer 1 signs, extracts adaptor from the published signature, + * and adapts the signature to take A-coins. */ + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig_a[1], &secnonce_a[1], &keypair_a[1], &keyagg_cache_a, &session_a)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(ctx, final_sig_a, &session_a, partial_sig_a_ptr, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_extract_adaptor(ctx, sec_adaptor_extracted, final_sig_b, pre_sig_b, nonce_parity_b) == 1); CHECK(memcmp(sec_adaptor_extracted, sec_adaptor, sizeof(sec_adaptor)) == 0); /* in real life we couldn't check this, of course */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_adapt(ctx, &partial_sig_a[0], &partial_sig_a[0], sec_adaptor_extracted, combined_nonce_parity_a)); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &musig_session_a[1], &partial_sig_a[1])); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(ctx, &musig_session_a[1], final_sig_a, partial_sig_a, 2) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig_a, msg32_a, &combined_pk_a) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_adapt(ctx, final_sig_a, sec_adaptor_extracted, nonce_parity_a)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig_a, msg32_a, sizeof(msg32_a), &agg_pk_a) == 1); } -/* Checks that hash initialized by rustsecp256k1zkp_v0_4_0_musig_sha256_init_tagged has the - * expected state. */ -void sha256_tag_test(void) { - char tag[17] = "MuSig coefficient"; +void sha256_tag_test_internal(rustsecp256k1zkp_v0_4_0_sha256 *sha_tagged, unsigned char *tag, size_t taglen) { rustsecp256k1zkp_v0_4_0_sha256 sha; - rustsecp256k1zkp_v0_4_0_sha256 sha_tagged; unsigned char buf[32]; unsigned char buf2[32]; size_t i; rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, (unsigned char *) tag, 17); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, tag, taglen); rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, buf); - /* buf = SHA256("MuSig coefficient") */ + /* buf = SHA256("KeyAgg coefficient") */ rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); rustsecp256k1zkp_v0_4_0_sha256_write(&sha, buf, 32); @@ -848,96 +727,114 @@ void sha256_tag_test(void) { CHECK((sha.bytes & 0x3F) == 0); /* Compare with tagged SHA */ - rustsecp256k1zkp_v0_4_0_musig_sha256_init_tagged(&sha_tagged); for (i = 0; i < 8; i++) { - CHECK(sha_tagged.s[i] == sha.s[i]); + CHECK(sha_tagged->s[i] == sha.s[i]); } rustsecp256k1zkp_v0_4_0_sha256_write(&sha, buf, 32); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha_tagged, buf, 32); + rustsecp256k1zkp_v0_4_0_sha256_write(sha_tagged, buf, 32); rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, buf); - rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha_tagged, buf2); + rustsecp256k1zkp_v0_4_0_sha256_finalize(sha_tagged, buf2); CHECK(memcmp(buf, buf2, 32) == 0); } -/* Attempts to create a signature for the combined public key using given secret - * keys and pre_session. */ -void musig_tweak_test_helper(const rustsecp256k1zkp_v0_4_0_xonly_pubkey* combined_pubkey, const unsigned char *sk0, const unsigned char *sk1, rustsecp256k1zkp_v0_4_0_musig_pre_session *pre_session) { - rustsecp256k1zkp_v0_4_0_musig_session session[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers0[2]; - rustsecp256k1zkp_v0_4_0_musig_session_signer_data signers1[2]; +/* Checks that the initialized tagged hashes initialized have the expected + * state. */ +void sha256_tag_test(void) { + rustsecp256k1zkp_v0_4_0_sha256 sha_tagged; + { + char tag[11] = "KeyAgg list"; + rustsecp256k1zkp_v0_4_0_musig_keyagglist_sha256(&sha_tagged); + sha256_tag_test_internal(&sha_tagged, (unsigned char*)tag, sizeof(tag)); + } + { + char tag[18] = "KeyAgg coefficient"; + rustsecp256k1zkp_v0_4_0_musig_keyaggcoef_sha256(&sha_tagged); + sha256_tag_test_internal(&sha_tagged, (unsigned char*)tag, sizeof(tag)); + } +} + +/* Attempts to create a signature for the aggregate public key using given secret + * keys and keyagg_cache. */ +void musig_tweak_test_helper(const rustsecp256k1zkp_v0_4_0_xonly_pubkey* agg_pk, const unsigned char *sk0, const unsigned char *sk1, rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache) { rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[2]; unsigned char session_id[2][32]; unsigned char msg[32]; - unsigned char nonce_commitment[2][32]; - unsigned char nonce[2][32]; - const unsigned char *ncs[2]; - rustsecp256k1zkp_v0_4_0_musig_partial_signature partial_sig[2]; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce[2]; + rustsecp256k1zkp_v0_4_0_musig_pubnonce pubnonce[2]; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_aggnonce aggnonce; + rustsecp256k1zkp_v0_4_0_keypair keypair[2]; + rustsecp256k1zkp_v0_4_0_musig_session session; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig[2]; + const rustsecp256k1zkp_v0_4_0_musig_partial_sig *partial_sig_ptr[2]; unsigned char final_sig[64]; + int i; - rustsecp256k1zkp_v0_4_0_testrand256(session_id[0]); - rustsecp256k1zkp_v0_4_0_testrand256(session_id[1]); + for (i = 0; i < 2; i++) { + pubnonce_ptr[i] = &pubnonce[i]; + partial_sig_ptr[i] = &partial_sig[i]; + + rustsecp256k1zkp_v0_4_0_testrand256(session_id[i]); + } + CHECK(create_keypair_and_pk(&keypair[0], &pk[0], sk0) == 1); + CHECK(create_keypair_and_pk(&keypair[1], &pk[1], sk1) == 1); rustsecp256k1zkp_v0_4_0_testrand256(msg); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[0], sk0) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[1], sk1) == 1); - - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session[0], signers0, nonce_commitment[0], session_id[0], msg, combined_pubkey, pre_session, 2, 0, sk0) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_init(ctx, &session[1], signers1, nonce_commitment[1], session_id[1], msg, combined_pubkey, pre_session, 2, 1, sk1) == 1); - /* Set nonce commitments */ - ncs[0] = nonce_commitment[0]; - ncs[1] = nonce_commitment[1]; - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session[0], signers0, nonce[0], ncs, 2, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_get_public_nonce(ctx, &session[1], signers1, nonce[1], ncs, 2, NULL) == 1); - /* Set nonces */ - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers0[0], nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers0[1], nonce[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers1[0], nonce[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_set_nonce(ctx, &signers1[1], nonce[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session[0], signers0, 2, NULL, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_session_combine_nonces(ctx, &session[1], signers1, 2, NULL, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &session[0], &partial_sig[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &session[1], &partial_sig[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[0], &signers0[1], &partial_sig[1], &pk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &session[1], &signers1[0], &partial_sig[0], &pk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_combine(ctx, &session[0], final_sig, partial_sig, 2)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig, msg, combined_pubkey) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce[0], &pubnonce[0], session_id[0], sk0, NULL, NULL, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce[1], &pubnonce[1], session_id[1], sk1, NULL, NULL, NULL) == 1); + + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(ctx, &aggnonce, pubnonce_ptr, 2)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(ctx, &session, &aggnonce, msg, keyagg_cache, NULL) == 1); + + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig[0], &secnonce[0], &keypair[0], keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig[1], &secnonce[1], &keypair[1], keyagg_cache, &session) == 1); + + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &partial_sig[0], &pubnonce[0], &pk[0], keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify(ctx, &partial_sig[1], &pubnonce[1], &pk[1], keyagg_cache, &session) == 1); + + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg(ctx, final_sig, &session, partial_sig_ptr, 2)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, final_sig, msg, sizeof(msg), agg_pk) == 1); } -/* In this test we create a combined public key P and a commitment Q = P + +/* In this test we create a aggregate public key P and a commitment Q = P + * hash(P, contract)*G. Then we test that we can sign for both public keys. In - * order to sign for Q we use the tweak32 argument of partial_sig_combine. */ + * order to sign for Q we use the tweak32 argument of partial_sig_agg. */ void musig_tweak_test(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { unsigned char sk[2][32]; rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[2]; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session_P; - rustsecp256k1zkp_v0_4_0_musig_pre_session pre_session_Q; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_ptr[2]; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache_P; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache_Q; rustsecp256k1zkp_v0_4_0_xonly_pubkey P; unsigned char P_serialized[32]; rustsecp256k1zkp_v0_4_0_pubkey Q; int Q_parity; rustsecp256k1zkp_v0_4_0_xonly_pubkey Q_xonly; unsigned char Q_serialized[32]; - rustsecp256k1zkp_v0_4_0_sha256 sha; unsigned char contract[32]; unsigned char ec_commit_tweak[32]; + int i; /* Setup */ - rustsecp256k1zkp_v0_4_0_testrand256(sk[0]); - rustsecp256k1zkp_v0_4_0_testrand256(sk[1]); + + for (i = 0; i < 2; i++) { + pk_ptr[i] = &pk[i]; + + rustsecp256k1zkp_v0_4_0_testrand256(sk[i]); + CHECK(create_keypair_and_pk(NULL, &pk[i], sk[i]) == 1); + } rustsecp256k1zkp_v0_4_0_testrand256(contract); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[0], sk[0]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_create(&pk[1], sk[1]) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_combine(ctx, scratch, &P, &pre_session_P, pk, 2) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, scratch, &P, &keyagg_cache_P, pk_ptr, 2) == 1); CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, P_serialized, &P) == 1); rustsecp256k1zkp_v0_4_0_sha256_initialize(&sha); rustsecp256k1zkp_v0_4_0_sha256_write(&sha, P_serialized, 32); rustsecp256k1zkp_v0_4_0_sha256_write(&sha, contract, 32); rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, ec_commit_tweak); - pre_session_Q = pre_session_P; - CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(ctx, &pre_session_Q, &Q, &P, ec_commit_tweak) == 1); + keyagg_cache_Q = keyagg_cache_P; + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add(ctx, &Q, ec_commit_tweak, &keyagg_cache_Q) == 1); CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_from_pubkey(ctx, &Q_xonly, &Q_parity, &Q)); CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, Q_serialized, &Q_xonly)); /* Check that musig_pubkey_tweak_add produces same result as @@ -945,9 +842,291 @@ void musig_tweak_test(rustsecp256k1zkp_v0_4_0_scratch_space *scratch) { CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_add_check(ctx, Q_serialized, Q_parity, &P, ec_commit_tweak) == 1); /* Test signing for P */ - musig_tweak_test_helper(&P, sk[0], sk[1], &pre_session_P); + musig_tweak_test_helper(&P, sk[0], sk[1], &keyagg_cache_P); /* Test signing for Q */ - musig_tweak_test_helper(&Q_xonly, sk[0], sk[1], &pre_session_Q); + musig_tweak_test_helper(&Q_xonly, sk[0], sk[1], &keyagg_cache_Q); +} + +void musig_test_vectors_keyagg_helper(const unsigned char **pk_ser, int n_pks, const unsigned char *agg_pk_expected, int has_second_pk, int second_pk_idx) { + rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk = malloc(n_pks * sizeof(*pk)); + const rustsecp256k1zkp_v0_4_0_xonly_pubkey **pk_ptr = malloc(n_pks * sizeof(*pk_ptr)); + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk; + unsigned char agg_pk_ser[32]; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache; + int i; + + for (i = 0; i < n_pks; i++) { + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(ctx, &pk[i], pk_ser[i])); + pk_ptr[i] = &pk[i]; + } + + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, NULL, &agg_pk, &keyagg_cache, pk_ptr, n_pks) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, &keyagg_cache)); + CHECK(rustsecp256k1zkp_v0_4_0_fe_is_zero(&cache_i.second_pk_x) == !has_second_pk); + if (!rustsecp256k1zkp_v0_4_0_fe_is_zero(&cache_i.second_pk_x)) { + rustsecp256k1zkp_v0_4_0_ge pk_pt; + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &pk_pt, &pk[second_pk_idx])); + CHECK(rustsecp256k1zkp_v0_4_0_fe_equal_var(&pk_pt.x, &cache_i.second_pk_x)); + } + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, agg_pk_ser, &agg_pk)); + /* TODO: remove when test vectors are not expected to change anymore */ + /* int k, l; */ + /* printf("const unsigned char agg_pk_expected[32] = {\n"); */ + /* for (k = 0; k < 4; k++) { */ + /* printf(" "); */ + /* for (l = 0; l < 8; l++) { */ + /* printf("0x%02X, ", agg_pk_ser[k*8+l]); */ + /* } */ + /* printf("\n"); */ + /* } */ + /* printf("};\n"); */ + CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(agg_pk_ser, agg_pk_expected, sizeof(agg_pk_ser)) == 0); + free(pk); + free(pk_ptr); +} + +/* Test vector public keys */ +const unsigned char vec_pk[3][32] = { + /* X1 */ + { + 0xF9, 0x30, 0x8A, 0x01, 0x92, 0x58, 0xC3, 0x10, + 0x49, 0x34, 0x4F, 0x85, 0xF8, 0x9D, 0x52, 0x29, + 0xB5, 0x31, 0xC8, 0x45, 0x83, 0x6F, 0x99, 0xB0, + 0x86, 0x01, 0xF1, 0x13, 0xBC, 0xE0, 0x36, 0xF9 + }, + /* X2 */ + { + 0xDF, 0xF1, 0xD7, 0x7F, 0x2A, 0x67, 0x1C, 0x5F, + 0x36, 0x18, 0x37, 0x26, 0xDB, 0x23, 0x41, 0xBE, + 0x58, 0xFE, 0xAE, 0x1D, 0xA2, 0xDE, 0xCE, 0xD8, + 0x43, 0x24, 0x0F, 0x7B, 0x50, 0x2B, 0xA6, 0x59 + }, + /* X3 */ + { + 0x35, 0x90, 0xA9, 0x4E, 0x76, 0x8F, 0x8E, 0x18, + 0x15, 0xC2, 0xF2, 0x4B, 0x4D, 0x80, 0xA8, 0xE3, + 0x14, 0x93, 0x16, 0xC3, 0x51, 0x8C, 0xE7, 0xB7, + 0xAD, 0x33, 0x83, 0x68, 0xD0, 0x38, 0xCA, 0x66 + } +}; + +void musig_test_vectors_keyagg(void) { + size_t i; + const unsigned char *pk[4]; + const unsigned char agg_pk_expected[4][32] = { + { /* 0 */ + 0xE5, 0x83, 0x01, 0x40, 0x51, 0x21, 0x95, 0xD7, + 0x4C, 0x83, 0x07, 0xE3, 0x96, 0x37, 0xCB, 0xE5, + 0xFB, 0x73, 0x0E, 0xBE, 0xAB, 0x80, 0xEC, 0x51, + 0x4C, 0xF8, 0x8A, 0x87, 0x7C, 0xEE, 0xEE, 0x0B, + }, + { /* 1 */ + 0xD7, 0x0C, 0xD6, 0x9A, 0x26, 0x47, 0xF7, 0x39, + 0x09, 0x73, 0xDF, 0x48, 0xCB, 0xFA, 0x2C, 0xCC, + 0x40, 0x7B, 0x8B, 0x2D, 0x60, 0xB0, 0x8C, 0x5F, + 0x16, 0x41, 0x18, 0x5C, 0x79, 0x98, 0xA2, 0x90, + }, + { /* 2 */ + 0x81, 0xA8, 0xB0, 0x93, 0x91, 0x2C, 0x9E, 0x48, + 0x14, 0x08, 0xD0, 0x97, 0x76, 0xCE, 0xFB, 0x48, + 0xAE, 0xB8, 0xB6, 0x54, 0x81, 0xB6, 0xBA, 0xAF, + 0xB3, 0xC5, 0x81, 0x01, 0x06, 0x71, 0x7B, 0xEB, + }, + { /* 3 */ + 0x2E, 0xB1, 0x88, 0x51, 0x88, 0x7E, 0x7B, 0xDC, + 0x5E, 0x83, 0x0E, 0x89, 0xB1, 0x9D, 0xDB, 0xC2, + 0x80, 0x78, 0xF1, 0xFA, 0x88, 0xAA, 0xD0, 0xAD, + 0x01, 0xCA, 0x06, 0xFE, 0x4F, 0x80, 0x21, 0x0B, + }, + }; + + for (i = 0; i < sizeof(agg_pk_expected)/sizeof(agg_pk_expected[0]); i++) { + size_t n_pks; + int has_second_pk; + int second_pk_idx; + switch (i) { + case 0: + /* [X1, X2, X3] */ + n_pks = 3; + pk[0] = vec_pk[0]; + pk[1] = vec_pk[1]; + pk[2] = vec_pk[2]; + has_second_pk = 1; + second_pk_idx = 1; + break; + case 1: + /* [X3, X2, X1] */ + n_pks = 3; + pk[2] = vec_pk[0]; + pk[1] = vec_pk[1]; + pk[0] = vec_pk[2]; + has_second_pk = 1; + second_pk_idx = 1; + break; + case 2: + /* [X1, X1, X1] */ + n_pks = 3; + pk[0] = vec_pk[0]; + pk[1] = vec_pk[0]; + pk[2] = vec_pk[0]; + has_second_pk = 0; + second_pk_idx = 0; /* unchecked */ + break; + case 3: + /* [X1, X1, X2, X2] */ + n_pks = 4; + pk[0] = vec_pk[0]; + pk[1] = vec_pk[0]; + pk[2] = vec_pk[1]; + pk[3] = vec_pk[1]; + has_second_pk = 1; + second_pk_idx = 2; /* second_pk_idx = 3 is equally valid */ + break; + default: + CHECK(0); + } + musig_test_vectors_keyagg_helper(pk, n_pks, agg_pk_expected[i], has_second_pk, second_pk_idx); + } +} +void musig_test_vectors_sign_helper(rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, int *fin_nonce_parity, unsigned char *sig, const unsigned char state[2][32], const unsigned char *agg_pubnonce_ser, const unsigned char *sk, const unsigned char *msg, const unsigned char **pk_ser, int signer_pos) { + rustsecp256k1zkp_v0_4_0_keypair signer_keypair; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce; + rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[3]; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_ptr[3]; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk; + rustsecp256k1zkp_v0_4_0_musig_session session; + rustsecp256k1zkp_v0_4_0_musig_aggnonce agg_pubnonce; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig; + int i; + + CHECK(create_keypair_and_pk(&signer_keypair, &pk[signer_pos], sk)); + for (i = 0; i < 3; i++) { + if (i != signer_pos) { + int offset = i < signer_pos ? 0 : -1; + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(ctx, &pk[i], pk_ser[i + offset])); + } + pk_ptr[i] = &pk[i]; + } + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, NULL, &agg_pk, keyagg_cache, pk_ptr, 3) == 1); + memcpy(&secnonce.data[0], rustsecp256k1zkp_v0_4_0_musig_secnonce_magic, 4); + memcpy(&secnonce.data[4], state, sizeof(secnonce.data) - 4); + CHECK(rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse(ctx, &agg_pubnonce, agg_pubnonce_ser) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(ctx, &session, &agg_pubnonce, msg, keyagg_cache, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig, &secnonce, &signer_keypair, keyagg_cache, &session) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_parity(ctx, fin_nonce_parity, &session)); + memcpy(sig, &partial_sig.data[4], 32); +} + +int musig_test_pk_parity(const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache) { + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + CHECK(rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, keyagg_cache)); + return rustsecp256k1zkp_v0_4_0_fe_is_odd(&cache_i.pk.y); +} + +int musig_test_is_second_pk(const rustsecp256k1zkp_v0_4_0_musig_keyagg_cache *keyagg_cache, const unsigned char *sk) { + rustsecp256k1zkp_v0_4_0_ge pkp; + rustsecp256k1zkp_v0_4_0_xonly_pubkey pk; + rustsecp256k1zkp_v0_4_0_keyagg_cache_internal cache_i; + CHECK(create_keypair_and_pk(NULL, &pk, sk)); + CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_load(ctx, &pkp, &pk)); + CHECK(rustsecp256k1zkp_v0_4_0_keyagg_cache_load(ctx, &cache_i, keyagg_cache)); + return rustsecp256k1zkp_v0_4_0_fe_equal_var(&cache_i.second_pk_x, &pkp.x); +} + +/* TODO: Add test vectors for failed signing */ +void musig_test_vectors_sign(void) { + unsigned char sig[32]; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache keyagg_cache; + int fin_nonce_parity; + /* The state corresponds to the two scalars that constitute the secret + * nonce. */ + const unsigned char state[2][32] = { + { + 0x50, 0x8B, 0x81, 0xA6, 0x11, 0xF1, 0x00, 0xA6, + 0xB2, 0xB6, 0xB2, 0x96, 0x56, 0x59, 0x08, 0x98, + 0xAF, 0x48, 0x8B, 0xCF, 0x2E, 0x1F, 0x55, 0xCF, + 0x22, 0xE5, 0xCF, 0xB8, 0x44, 0x21, 0xFE, 0x61, + }, + { + 0xFA, 0x27, 0xFD, 0x49, 0xB1, 0xD5, 0x00, 0x85, + 0xB4, 0x81, 0x28, 0x5E, 0x1C, 0xA2, 0x05, 0xD5, + 0x5C, 0x82, 0xCC, 0x1B, 0x31, 0xFF, 0x5C, 0xD5, + 0x4A, 0x48, 0x98, 0x29, 0x35, 0x59, 0x01, 0xF7, + } + }; + /* The nonces are already aggregated */ + const unsigned char agg_pubnonce[66] = { + 0x02, + 0x84, 0x65, 0xFC, 0xF0, 0xBB, 0xDB, 0xCF, 0x44, + 0x3A, 0xAB, 0xCC, 0xE5, 0x33, 0xD4, 0x2B, 0x4B, + 0x5A, 0x10, 0x96, 0x6A, 0xC0, 0x9A, 0x49, 0x65, + 0x5E, 0x8C, 0x42, 0xDA, 0xAB, 0x8F, 0xCD, 0x61, + 0x03, + 0x74, 0x96, 0xA3, 0xCC, 0x86, 0x92, 0x6D, 0x45, + 0x2C, 0xAF, 0xCF, 0xD5, 0x5D, 0x25, 0x97, 0x2C, + 0xA1, 0x67, 0x5D, 0x54, 0x93, 0x10, 0xDE, 0x29, + 0x6B, 0xFF, 0x42, 0xF7, 0x2E, 0xEE, 0xA8, 0xC9, + }; + const unsigned char sk[32] = { + 0x7F, 0xB9, 0xE0, 0xE6, 0x87, 0xAD, 0xA1, 0xEE, + 0xBF, 0x7E, 0xCF, 0xE2, 0xF2, 0x1E, 0x73, 0xEB, + 0xDB, 0x51, 0xA7, 0xD4, 0x50, 0x94, 0x8D, 0xFE, + 0x8D, 0x76, 0xD7, 0xF2, 0xD1, 0x00, 0x76, 0x71, + }; + const unsigned char msg[32] = { + 0xF7, 0x54, 0x66, 0xD0, 0x86, 0x77, 0x0E, 0x68, + 0x99, 0x64, 0x66, 0x42, 0x19, 0x26, 0x6F, 0xE5, + 0xED, 0x21, 0x5C, 0x92, 0xAE, 0x20, 0xBA, 0xB5, + 0xC9, 0xD7, 0x9A, 0xDD, 0xDD, 0xF3, 0xC0, 0xCF, + }; + const unsigned char *pk[2] = { vec_pk[0], vec_pk[1] }; + + { + const unsigned char sig_expected[32] = { + 0x00, 0xB6, 0x9D, 0x89, 0xCD, 0x3A, 0x54, 0xF3, + 0x9F, 0x2D, 0x2D, 0xDC, 0x5B, 0xE1, 0x90, 0x5E, + 0x08, 0xD2, 0x9E, 0x26, 0x6A, 0xD3, 0xA0, 0x59, + 0x92, 0x05, 0xF9, 0xF7, 0x91, 0x45, 0xDC, 0xF9, + }; + musig_test_vectors_sign_helper(&keyagg_cache, &fin_nonce_parity, sig, state, agg_pubnonce, sk, msg, pk, 0); + /* TODO: remove when test vectors are not expected to change anymore */ + /* int k, l; */ + /* printf("const unsigned char sig_expected[32] = {\n"); */ + /* for (k = 0; k < 4; k++) { */ + /* printf(" "); */ + /* for (l = 0; l < 8; l++) { */ + /* printf("0x%02X, ", sig[k*8+l]); */ + /* } */ + /* printf("\n"); */ + /* } */ + /* printf("};\n"); */ + + /* This is a test where the combined public key point has an _odd_ y + * coordinate, the signer _is not_ the second pubkey in the list and the + * nonce parity is 1. */ + CHECK(musig_test_pk_parity(&keyagg_cache) == 1); + CHECK(!musig_test_is_second_pk(&keyagg_cache, sk)); + CHECK(fin_nonce_parity == 1); + CHECK(memcmp(sig, sig_expected, 32) == 0); + } + { + const unsigned char sig_expected[32] = { + 0x7C, 0x45, 0xDD, 0xB6, 0x7D, 0x3D, 0x7C, 0x3D, + 0xE8, 0x82, 0x22, 0xFC, 0xF6, 0x62, 0x0D, 0xCE, + 0xBE, 0x92, 0x3D, 0x3B, 0x02, 0xF0, 0xAE, 0xC4, + 0x66, 0xEC, 0xBC, 0xA3, 0x01, 0x3A, 0x7C, 0xCB, + }; + musig_test_vectors_sign_helper(&keyagg_cache, &fin_nonce_parity, sig, state, agg_pubnonce, sk, msg, pk, 1); + + /* This is a test where the aggregate public key point has an _even_ y + * coordinate, the signer _is_ the second pubkey in the list and the + * nonce parity is 0. */ + CHECK(musig_test_pk_parity(&keyagg_cache) == 0); + CHECK(musig_test_is_second_pk(&keyagg_cache, sk)); + CHECK(fin_nonce_parity == 0); + CHECK(memcmp(sig, sig_expected, 32) == 0); + } } void run_musig_tests(void) { @@ -958,7 +1137,7 @@ void run_musig_tests(void) { musig_simple_test(scratch); } musig_api_tests(scratch); - musig_state_machine_tests(scratch); + musig_nonce_test(); for (i = 0; i < count; i++) { /* Run multiple times to ensure that pk and nonce have different y * parities */ @@ -966,6 +1145,8 @@ void run_musig_tests(void) { musig_tweak_test(scratch); } sha256_tag_test(); + musig_test_vectors_keyagg(); + musig_test_vectors_sign(); rustsecp256k1zkp_v0_4_0_scratch_space_destroy(ctx, scratch); } diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/main_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/main_impl.h index d85e5fc6..61d6569e 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/main_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/main_impl.h @@ -182,7 +182,15 @@ int rustsecp256k1zkp_v0_4_0_pedersen_blind_generator_blind_sum(const rustsecp256 } rustsecp256k1zkp_v0_4_0_scalar_set_int(&sum, 0); - for (i = 0; i < n_total; i++) { + + /* Here, n_total > 0. Thus the loop runs at least once. + Thus we may use a do-while loop, which checks the loop + condition only at the end. + + The do-while loop helps GCC prove that the loop runs at least + once and suppresses a -Wmaybe-uninitialized warning. */ + i = 0; + do { int overflow = 0; rustsecp256k1zkp_v0_4_0_scalar addend; rustsecp256k1zkp_v0_4_0_scalar_set_u64(&addend, value[i]); /* s = v */ @@ -207,7 +215,9 @@ int rustsecp256k1zkp_v0_4_0_pedersen_blind_generator_blind_sum(const rustsecp256 rustsecp256k1zkp_v0_4_0_scalar_cond_negate(&addend, i < n_inputs); /* s is negated if it's an input */ rustsecp256k1zkp_v0_4_0_scalar_add(&sum, &sum, &addend); /* sum += s */ rustsecp256k1zkp_v0_4_0_scalar_clear(&addend); - } + + i++; + } while (i < n_total); /* Right now tmp has the last pedersen blinding factor. Subtract the sum from it. */ rustsecp256k1zkp_v0_4_0_scalar_negate(&sum, &sum); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/rangeproof_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/rangeproof_impl.h index 817bace2..b1896b10 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/rangeproof_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/rangeproof/rangeproof_impl.h @@ -369,7 +369,7 @@ SECP256K1_INLINE static int rustsecp256k1zkp_v0_4_0_rangeproof_rewind_inner(rust rustsecp256k1zkp_v0_4_0_scalar stmp; unsigned char prep[4096]; unsigned char tmp[32]; - uint64_t value; + uint64_t value = 0; size_t offset; size_t i; size_t j; diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/main_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/main_impl.h index f86465b6..3157ad74 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/main_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/main_impl.h @@ -7,7 +7,7 @@ #ifndef SECP256K1_MODULE_RECOVERY_MAIN_H #define SECP256K1_MODULE_RECOVERY_MAIN_H -#include "include/secp256k1_recovery.h" +#include "../../../include/secp256k1_recovery.h" static void rustsecp256k1zkp_v0_4_0_ecdsa_recoverable_signature_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_scalar* r, rustsecp256k1zkp_v0_4_0_scalar* s, int* recid, const rustsecp256k1zkp_v0_4_0_ecdsa_recoverable_signature* sig) { (void)ctx; diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h index 05855da9..e221aa51 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/recovery/tests_exhaustive_impl.h @@ -8,7 +8,7 @@ #define SECP256K1_MODULE_RECOVERY_EXHAUSTIVE_TESTS_H #include "src/modules/recovery/main_impl.h" -#include "include/secp256k1_recovery.h" +#include "../../../include/secp256k1_recovery.h" void test_exhaustive_recovery_sign(const rustsecp256k1zkp_v0_4_0_context *ctx, const rustsecp256k1zkp_v0_4_0_ge *group) { int i, j, k; diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h index 38f05225..2137979e 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/main_impl.h @@ -7,9 +7,9 @@ #ifndef SECP256K1_MODULE_SCHNORRSIG_MAIN_H #define SECP256K1_MODULE_SCHNORRSIG_MAIN_H -#include "include/secp256k1.h" -#include "include/secp256k1_schnorrsig.h" -#include "hash.h" +#include "../../../include/secp256k1.h" +#include "../../../include/secp256k1_schnorrsig.h" +#include "../../hash.h" /* Initializes SHA256 with fixed midstate. This midstate was computed by applying * SHA256 to SHA256("BIP0340/nonce")||SHA256("BIP0340/nonce"). */ @@ -43,16 +43,18 @@ static void rustsecp256k1zkp_v0_4_0_nonce_function_bip340_sha256_tagged_aux(rust sha->bytes = 64; } -/* algo16 argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340 +/* algo argument for nonce_function_bip340 to derive the nonce exactly as stated in BIP-340 * by using the correct tagged hash function. */ -static const unsigned char bip340_algo16[16] = "BIP0340/nonce\0\0\0"; +static const unsigned char bip340_algo[13] = "BIP0340/nonce"; -static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { +static const unsigned char schnorrsig_extraparams_magic[4] = SECP256K1_SCHNORRSIG_EXTRAPARAMS_MAGIC; + +static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { rustsecp256k1zkp_v0_4_0_sha256 sha; unsigned char masked_key[32]; int i; - if (algo16 == NULL) { + if (algo == NULL) { return 0; } @@ -65,18 +67,14 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms } } - /* Tag the hash with algo16 which is important to avoid nonce reuse across + /* Tag the hash with algo which is important to avoid nonce reuse across * algorithms. If this nonce function is used in BIP-340 signing as defined * in the spec, an optimized tagging implementation is used. */ - if (rustsecp256k1zkp_v0_4_0_memcmp_var(algo16, bip340_algo16, 16) == 0) { + if (algolen == sizeof(bip340_algo) + && rustsecp256k1zkp_v0_4_0_memcmp_var(algo, bip340_algo, algolen) == 0) { rustsecp256k1zkp_v0_4_0_nonce_function_bip340_sha256_tagged(&sha); } else { - int algo16_len = 16; - /* Remove terminating null bytes */ - while (algo16_len > 0 && !algo16[algo16_len - 1]) { - algo16_len--; - } - rustsecp256k1zkp_v0_4_0_sha256_initialize_tagged(&sha, algo16, algo16_len); + rustsecp256k1zkp_v0_4_0_sha256_initialize_tagged(&sha, algo, algolen); } /* Hash (masked-)key||pk||msg using the tagged hash as per the spec */ @@ -86,7 +84,7 @@ static int nonce_function_bip340(unsigned char *nonce32, const unsigned char *ms rustsecp256k1zkp_v0_4_0_sha256_write(&sha, key32, 32); } rustsecp256k1zkp_v0_4_0_sha256_write(&sha, xonly_pk32, 32); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg32, 32); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg, msglen); rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, nonce32); return 1; } @@ -108,23 +106,23 @@ static void rustsecp256k1zkp_v0_4_0_schnorrsig_sha256_tagged(rustsecp256k1zkp_v0 sha->bytes = 64; } -static void rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(rustsecp256k1zkp_v0_4_0_scalar* e, const unsigned char *r32, const unsigned char *msg32, const unsigned char *pubkey32) +static void rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(rustsecp256k1zkp_v0_4_0_scalar* e, const unsigned char *r32, const unsigned char *msg, size_t msglen, const unsigned char *pubkey32) { unsigned char buf[32]; rustsecp256k1zkp_v0_4_0_sha256 sha; - /* tagged hash(r.x, pk.x, msg32) */ + /* tagged hash(r.x, pk.x, msg) */ rustsecp256k1zkp_v0_4_0_schnorrsig_sha256_tagged(&sha); rustsecp256k1zkp_v0_4_0_sha256_write(&sha, r32, 32); rustsecp256k1zkp_v0_4_0_sha256_write(&sha, pubkey32, 32); - rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg32, 32); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg, msglen); rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, buf); /* Set scalar e to the challenge hash modulo the curve order as per * BIP340. */ rustsecp256k1zkp_v0_4_0_scalar_set_b32(e, buf, NULL); } -int rustsecp256k1zkp_v0_4_0_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_keypair *keypair, rustsecp256k1zkp_v0_4_0_nonce_function_hardened noncefp, void *ndata) { +int rustsecp256k1zkp_v0_4_0_schnorrsig_sign_internal(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1zkp_v0_4_0_keypair *keypair, rustsecp256k1zkp_v0_4_0_nonce_function_hardened noncefp, void *ndata) { rustsecp256k1zkp_v0_4_0_scalar sk; rustsecp256k1zkp_v0_4_0_scalar e; rustsecp256k1zkp_v0_4_0_scalar k; @@ -139,7 +137,7 @@ int rustsecp256k1zkp_v0_4_0_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_contex VERIFY_CHECK(ctx != NULL); ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx)); ARG_CHECK(sig64 != NULL); - ARG_CHECK(msg32 != NULL); + ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(keypair != NULL); if (noncefp == NULL) { @@ -156,7 +154,7 @@ int rustsecp256k1zkp_v0_4_0_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_contex rustsecp256k1zkp_v0_4_0_scalar_get_b32(seckey, &sk); rustsecp256k1zkp_v0_4_0_fe_get_b32(pk_buf, &pk.x); - ret &= !!noncefp(buf, msg32, seckey, pk_buf, bip340_algo16, ndata); + ret &= !!noncefp(buf, msg, msglen, seckey, pk_buf, bip340_algo, sizeof(bip340_algo), ndata); rustsecp256k1zkp_v0_4_0_scalar_set_b32(&k, buf, NULL); ret &= !rustsecp256k1zkp_v0_4_0_scalar_is_zero(&k); rustsecp256k1zkp_v0_4_0_scalar_cmov(&k, &rustsecp256k1zkp_v0_4_0_scalar_one, !ret); @@ -174,7 +172,7 @@ int rustsecp256k1zkp_v0_4_0_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_contex rustsecp256k1zkp_v0_4_0_fe_normalize_var(&r.x); rustsecp256k1zkp_v0_4_0_fe_get_b32(&sig64[0], &r.x); - rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg32, pk_buf); + rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, pk_buf); rustsecp256k1zkp_v0_4_0_scalar_mul(&e, &e, &sk); rustsecp256k1zkp_v0_4_0_scalar_add(&e, &e, &k); rustsecp256k1zkp_v0_4_0_scalar_get_b32(&sig64[32], &e); @@ -187,7 +185,26 @@ int rustsecp256k1zkp_v0_4_0_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_contex return ret; } -int rustsecp256k1zkp_v0_4_0_schnorrsig_verify(const rustsecp256k1zkp_v0_4_0_context* ctx, const unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey) { +int rustsecp256k1zkp_v0_4_0_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *msg32, const rustsecp256k1zkp_v0_4_0_keypair *keypair, unsigned char *aux_rand32) { + return rustsecp256k1zkp_v0_4_0_schnorrsig_sign_internal(ctx, sig64, msg32, 32, keypair, rustsecp256k1zkp_v0_4_0_nonce_function_bip340, aux_rand32); +} + +int rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1zkp_v0_4_0_keypair *keypair, rustsecp256k1zkp_v0_4_0_schnorrsig_extraparams *extraparams) { + rustsecp256k1zkp_v0_4_0_nonce_function_hardened noncefp = NULL; + void *ndata = NULL; + VERIFY_CHECK(ctx != NULL); + + if (extraparams != NULL) { + ARG_CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(extraparams->magic, + schnorrsig_extraparams_magic, + sizeof(extraparams->magic)) == 0); + noncefp = extraparams->noncefp; + ndata = extraparams->ndata; + } + return rustsecp256k1zkp_v0_4_0_schnorrsig_sign_internal(ctx, sig64, msg, msglen, keypair, noncefp, ndata); +} + +int rustsecp256k1zkp_v0_4_0_schnorrsig_verify(const rustsecp256k1zkp_v0_4_0_context* ctx, const unsigned char *sig64, const unsigned char *msg, size_t msglen, const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pubkey) { rustsecp256k1zkp_v0_4_0_scalar s; rustsecp256k1zkp_v0_4_0_scalar e; rustsecp256k1zkp_v0_4_0_gej rj; @@ -201,7 +218,7 @@ int rustsecp256k1zkp_v0_4_0_schnorrsig_verify(const rustsecp256k1zkp_v0_4_0_cont VERIFY_CHECK(ctx != NULL); ARG_CHECK(rustsecp256k1zkp_v0_4_0_ecmult_context_is_built(&ctx->ecmult_ctx)); ARG_CHECK(sig64 != NULL); - ARG_CHECK(msg32 != NULL); + ARG_CHECK(msg != NULL || msglen == 0); ARG_CHECK(pubkey != NULL); if (!rustsecp256k1zkp_v0_4_0_fe_set_b32(&rx, &sig64[0])) { @@ -219,7 +236,7 @@ int rustsecp256k1zkp_v0_4_0_schnorrsig_verify(const rustsecp256k1zkp_v0_4_0_cont /* Compute e. */ rustsecp256k1zkp_v0_4_0_fe_get_b32(buf, &pk.x); - rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg32, buf); + rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, &sig64[0], msg, msglen, buf); /* Compute rj = s*G + (-e)*pkj */ rustsecp256k1zkp_v0_4_0_scalar_negate(&e, &e); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h index f1658e95..f91297ca 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h @@ -7,7 +7,7 @@ #ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H #define SECP256K1_MODULE_SCHNORRSIG_TESTS_EXHAUSTIVE_H -#include "include/secp256k1_schnorrsig.h" +#include "../../../include/secp256k1_schnorrsig.h" #include "src/modules/schnorrsig/main_impl.h" static const unsigned char invalid_pubkey_bytes[][32] = { @@ -58,15 +58,19 @@ static const unsigned char invalid_pubkey_bytes[][32] = { #define NUM_INVALID_KEYS (sizeof(invalid_pubkey_bytes) / sizeof(invalid_pubkey_bytes[0])) -static int rustsecp256k1zkp_v0_4_0_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, +static int rustsecp256k1zkp_v0_4_0_hardened_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg, + size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, - const unsigned char *algo16, void* data) { + const unsigned char *algo, size_t algolen, + void* data) { rustsecp256k1zkp_v0_4_0_scalar s; int *idata = data; - (void)msg32; + (void)msg; + (void)msglen; (void)key32; (void)xonly_pk32; - (void)algo16; + (void)algo; + (void)algolen; rustsecp256k1zkp_v0_4_0_scalar_set_int(&s, *idata); rustsecp256k1zkp_v0_4_0_scalar_get_b32(nonce32, &s); return 1; @@ -101,7 +105,7 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1zkp_v0_4_0_cont rustsecp256k1zkp_v0_4_0_scalar e; unsigned char msg32[32]; rustsecp256k1zkp_v0_4_0_testrand256(msg32); - rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, sig64, msg32, pk32); + rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { /* Iterate over the possible valid last 32 bytes in the signature. @@ -119,7 +123,7 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1zkp_v0_4_0_cont rustsecp256k1zkp_v0_4_0_testrand256(sig64 + 32); expect_valid = 0; } - valid = rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig64, msg32, &pubkeys[d - 1]); + valid = rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]); CHECK(valid == expect_valid); count_valid += valid; } @@ -137,6 +141,8 @@ static void test_exhaustive_schnorrsig_verify(const rustsecp256k1zkp_v0_4_0_cont static void test_exhaustive_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_context *ctx, unsigned char (*xonly_pubkey_bytes)[32], const rustsecp256k1zkp_v0_4_0_keypair* keypairs, const int* parities) { int d, k; uint64_t iter = 0; + rustsecp256k1zkp_v0_4_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + /* Loop over keys. */ for (d = 1; d < EXHAUSTIVE_TEST_ORDER; ++d) { int actual_d = d; @@ -149,19 +155,21 @@ static void test_exhaustive_schnorrsig_sign(const rustsecp256k1zkp_v0_4_0_contex unsigned char sig64[64]; int actual_k = k; if (skip_section(&iter)) continue; + extraparams.noncefp = rustsecp256k1zkp_v0_4_0_hardened_nonce_function_smallint; + extraparams.ndata = &k; if (parities[k - 1]) actual_k = EXHAUSTIVE_TEST_ORDER - k; /* Generate random messages until all challenges have been tried. */ while (e_count_done < EXHAUSTIVE_TEST_ORDER) { rustsecp256k1zkp_v0_4_0_scalar e; rustsecp256k1zkp_v0_4_0_testrand256(msg32); - rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, xonly_pubkey_bytes[d - 1]); + rustsecp256k1zkp_v0_4_0_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]); /* Only do work if we hit a challenge we haven't tried before. */ if (!e_done[e]) { rustsecp256k1zkp_v0_4_0_scalar expected_s = (actual_k + e * actual_d) % EXHAUSTIVE_TEST_ORDER; unsigned char expected_s_bytes[32]; rustsecp256k1zkp_v0_4_0_scalar_get_b32(expected_s_bytes, &expected_s); /* Invoke the real function to construct a signature. */ - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig64, msg32, &keypairs[d - 1], rustsecp256k1zkp_v0_4_0_hardened_nonce_function_smallint, &k)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig64, msg32, sizeof(msg32), &keypairs[d - 1], &extraparams)); /* The first 32 bytes must match the xonly pubkey for the specified k. */ CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(sig64, xonly_pubkey_bytes[k - 1], 32) == 0); /* The last 32 bytes must match the expected s value. */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h index 13afb2a0..7ec1cd47 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/modules/schnorrsig/tests_impl.h @@ -7,16 +7,16 @@ #ifndef SECP256K1_MODULE_SCHNORRSIG_TESTS_H #define SECP256K1_MODULE_SCHNORRSIG_TESTS_H -#include "secp256k1_schnorrsig.h" +#include "../../../include/secp256k1_schnorrsig.h" /* Checks that a bit flip in the n_flip-th argument (that has n_bytes many * bytes) changes the hash function */ -void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes) { +void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes, size_t msglen, size_t algolen) { unsigned char nonces[2][32]; - CHECK(nonce_function_bip340(nonces[0], args[0], args[1], args[2], args[3], args[4]) == 1); + CHECK(nonce_function_bip340(nonces[0], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); rustsecp256k1zkp_v0_4_0_testrand_flip(args[n_flip], n_bytes); - CHECK(nonce_function_bip340(nonces[1], args[0], args[1], args[2], args[3], args[4]) == 1); + CHECK(nonce_function_bip340(nonces[1], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1); CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(nonces[0], nonces[1], 32) != 0); } @@ -34,11 +34,13 @@ void test_sha256_eq(const rustsecp256k1zkp_v0_4_0_sha256 *sha1, const rustsecp25 void run_nonce_function_bip340_tests(void) { unsigned char tag[13] = "BIP0340/nonce"; unsigned char aux_tag[11] = "BIP0340/aux"; - unsigned char algo16[16] = "BIP0340/nonce\0\0\0"; + unsigned char algo[13] = "BIP0340/nonce"; + size_t algolen = sizeof(algo); rustsecp256k1zkp_v0_4_0_sha256 sha; rustsecp256k1zkp_v0_4_0_sha256 sha_optimized; unsigned char nonce[32]; unsigned char msg[32]; + size_t msglen = sizeof(msg); unsigned char key[32]; unsigned char pk[32]; unsigned char aux_rand[32]; @@ -68,33 +70,45 @@ void run_nonce_function_bip340_tests(void) { args[0] = msg; args[1] = key; args[2] = pk; - args[3] = algo16; + args[3] = algo; args[4] = aux_rand; for (i = 0; i < count; i++) { - nonce_function_bip340_bitflip(args, 0, 32); - nonce_function_bip340_bitflip(args, 1, 32); - nonce_function_bip340_bitflip(args, 2, 32); - /* Flip algo16 special case "BIP0340/nonce" */ - nonce_function_bip340_bitflip(args, 3, 16); - /* Flip algo16 again */ - nonce_function_bip340_bitflip(args, 3, 16); - nonce_function_bip340_bitflip(args, 4, 32); + nonce_function_bip340_bitflip(args, 0, 32, msglen, algolen); + nonce_function_bip340_bitflip(args, 1, 32, msglen, algolen); + nonce_function_bip340_bitflip(args, 2, 32, msglen, algolen); + /* Flip algo special case "BIP0340/nonce" */ + nonce_function_bip340_bitflip(args, 3, algolen, msglen, algolen); + /* Flip algo again */ + nonce_function_bip340_bitflip(args, 3, algolen, msglen, algolen); + nonce_function_bip340_bitflip(args, 4, 32, msglen, algolen); } - /* NULL algo16 is disallowed */ - CHECK(nonce_function_bip340(nonce, msg, key, pk, NULL, NULL) == 0); - /* Empty algo16 is fine */ - memset(algo16, 0x00, 16); - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); - /* algo16 with terminating null bytes is fine */ - algo16[1] = 65; - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); - /* Other algo16 is fine */ - memset(algo16, 0xFF, 16); - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); + /* NULL algo is disallowed */ + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, NULL, 0, NULL) == 0); + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); + /* Other algo is fine */ + rustsecp256k1zkp_v0_4_0_rfc6979_hmac_sha256_generate(&rustsecp256k1zkp_v0_4_0_test_rng, algo, algolen); + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); + + for (i = 0; i < count; i++) { + unsigned char nonce2[32]; + uint32_t offset = rustsecp256k1zkp_v0_4_0_testrand_int(msglen - 1); + size_t msglen_tmp = (msglen + offset) % msglen; + size_t algolen_tmp; + + /* Different msglen gives different nonce */ + CHECK(nonce_function_bip340(nonce2, msg, msglen_tmp, key, pk, algo, algolen, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(nonce, nonce2, 32) != 0); + + /* Different algolen gives different nonce */ + offset = rustsecp256k1zkp_v0_4_0_testrand_int(algolen - 1); + algolen_tmp = (algolen + offset) % algolen; + CHECK(nonce_function_bip340(nonce2, msg, msglen, key, pk, algo, algolen_tmp, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(nonce, nonce2, 32) != 0); + } /* NULL aux_rand argument is allowed. */ - CHECK(nonce_function_bip340(nonce, msg, key, pk, algo16, NULL) == 1); + CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1); } void test_schnorrsig_api(void) { @@ -103,10 +117,12 @@ void test_schnorrsig_api(void) { unsigned char sk3[32]; unsigned char msg[32]; rustsecp256k1zkp_v0_4_0_keypair keypairs[3]; - rustsecp256k1zkp_v0_4_0_keypair invalid_keypair = { 0 }; + rustsecp256k1zkp_v0_4_0_keypair invalid_keypair = {{ 0 }}; rustsecp256k1zkp_v0_4_0_xonly_pubkey pk[3]; rustsecp256k1zkp_v0_4_0_xonly_pubkey zero_pk; unsigned char sig[64]; + rustsecp256k1zkp_v0_4_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + rustsecp256k1zkp_v0_4_0_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL}; /** setup **/ rustsecp256k1zkp_v0_4_0_context *none = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); @@ -138,36 +154,60 @@ void test_schnorrsig_api(void) { /** main test body **/ ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(none, sig, msg, &keypairs[0], NULL) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL) == 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL) == 0); + CHECK(ecount == 4); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, NULL, NULL) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL) == 0); + CHECK(ecount == 6); + + ecount = 0; + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(none, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(vrfy, sig, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(vrfy, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &extraparams) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, NULL, msg, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, NULL, msg, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, NULL, &keypairs[0], NULL, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, sig, NULL, sizeof(msg), &keypairs[0], &extraparams) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, NULL, NULL, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, sig, NULL, 0, &keypairs[0], &extraparams) == 1); + CHECK(ecount == 4); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), NULL, &extraparams) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, &invalid_keypair, NULL, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &invalid_keypair, &extraparams) == 0); + CHECK(ecount == 6); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], NULL) == 1); CHECK(ecount == 6); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(sign, sig, msg, sizeof(msg), &keypairs[0], &invalid_extraparams) == 0); + CHECK(ecount == 7); ecount = 0; - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(none, sig, msg, &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(sign, sig, msg, &keypairs[0], NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(none, sig, msg, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(sign, sig, msg, &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(sign, sig, msg, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, msg, &pk[0]) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &pk[0]) == 1); CHECK(ecount == 2); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, NULL, msg, &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, NULL, msg, sizeof(msg), &pk[0]) == 0); CHECK(ecount == 3); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, NULL, &pk[0]) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, NULL, sizeof(msg), &pk[0]) == 0); + CHECK(ecount == 4); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, NULL, 0, &pk[0]) == 0); CHECK(ecount == 4); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, msg, NULL) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), NULL) == 0); CHECK(ecount == 5); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, msg, &zero_pk) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(vrfy, sig, msg, sizeof(msg), &zero_pk) == 0); CHECK(ecount == 6); rustsecp256k1zkp_v0_4_0_context_destroy(none); @@ -179,7 +219,7 @@ void test_schnorrsig_api(void) { /* Checks that hash initialized by rustsecp256k1zkp_v0_4_0_schnorrsig_sha256_tagged has the * expected state. */ void test_schnorrsig_sha256_tagged(void) { - char tag[17] = "BIP0340/challenge"; + unsigned char tag[17] = "BIP0340/challenge"; rustsecp256k1zkp_v0_4_0_sha256 sha; rustsecp256k1zkp_v0_4_0_sha256 sha_optimized; @@ -190,19 +230,19 @@ void test_schnorrsig_sha256_tagged(void) { /* Helper function for schnorrsig_bip_vectors * Signs the message and checks that it's the same as expected_sig. */ -void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg, const unsigned char *expected_sig) { +void test_schnorrsig_bip_vectors_check_signing(const unsigned char *sk, const unsigned char *pk_serialized, unsigned char *aux_rand, const unsigned char *msg32, const unsigned char *expected_sig) { unsigned char sig[64]; rustsecp256k1zkp_v0_4_0_keypair keypair; rustsecp256k1zkp_v0_4_0_xonly_pubkey pk, pk_expected; CHECK(rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, aux_rand)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg32, &keypair, aux_rand)); CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(sig, expected_sig, 64) == 0); CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(ctx, &pk_expected, pk_serialized)); CHECK(rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(&pk, &pk_expected, sizeof(pk)) == 0); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, &pk)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); } /* Helper function for schnorrsig_bip_vectors @@ -211,7 +251,7 @@ void test_schnorrsig_bip_vectors_check_verify(const unsigned char *pk_serialized rustsecp256k1zkp_v0_4_0_xonly_pubkey pk; CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(ctx, &pk, pk_serialized)); - CHECK(expected == rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg32, &pk)); + CHECK(expected == rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg32, 32, &pk)); } /* Test vectors according to BIP-340 ("Schnorr Signatures for secp256k1"). See @@ -634,22 +674,26 @@ void test_schnorrsig_bip_vectors(void) { } /* Nonce function that returns constant 0 */ -static int nonce_function_failing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { - (void) msg32; +static int nonce_function_failing(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { + (void) msg; + (void) msglen; (void) key32; (void) xonly_pk32; - (void) algo16; + (void) algo; + (void) algolen; (void) data; (void) nonce32; return 0; } /* Nonce function that sets nonce to 0 */ -static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { - (void) msg32; +static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { + (void) msg; + (void) msglen; (void) key32; (void) xonly_pk32; - (void) algo16; + (void) algo; + (void) algolen; (void) data; memset(nonce32, 0, 32); @@ -657,11 +701,13 @@ static int nonce_function_0(unsigned char *nonce32, const unsigned char *msg32, } /* Nonce function that sets nonce to 0xFF...0xFF */ -static int nonce_function_overflowing(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo16, void *data) { - (void) msg32; +static int nonce_function_overflowing(unsigned char *nonce32, const unsigned char *msg, size_t msglen, const unsigned char *key32, const unsigned char *xonly_pk32, const unsigned char *algo, size_t algolen, void *data) { + (void) msg; + (void) msglen; (void) key32; (void) xonly_pk32; - (void) algo16; + (void) algo; + (void) algolen; (void) data; memset(nonce32, 0xFF, 32); @@ -670,24 +716,45 @@ static int nonce_function_overflowing(unsigned char *nonce32, const unsigned cha void test_schnorrsig_sign(void) { unsigned char sk[32]; + rustsecp256k1zkp_v0_4_0_xonly_pubkey pk; rustsecp256k1zkp_v0_4_0_keypair keypair; const unsigned char msg[32] = "this is a msg for a schnorrsig.."; unsigned char sig[64]; + unsigned char sig2[64]; unsigned char zeros64[64] = { 0 }; + rustsecp256k1zkp_v0_4_0_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT; + unsigned char aux_rand[32]; rustsecp256k1zkp_v0_4_0_testrand256(sk); + rustsecp256k1zkp_v0_4_0_testrand256(aux_rand); CHECK(rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, sk)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); /* Test different nonce functions */ + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); memset(sig, 1, sizeof(sig)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_failing, NULL) == 0); + extraparams.noncefp = nonce_function_failing; + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); memset(&sig, 1, sizeof(sig)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_0, NULL) == 0); + extraparams.noncefp = nonce_function_0; + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 0); CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) == 0); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, nonce_function_overflowing, NULL) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(sig, zeros64, sizeof(sig)) != 0); + memset(&sig, 1, sizeof(sig)); + extraparams.noncefp = nonce_function_overflowing; + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &pk)); + + /* When using the default nonce function, schnorrsig_sign_custom produces + * the same result as schnorrsig_sign with aux_rand = extraparams.ndata */ + extraparams.noncefp = NULL; + extraparams.ndata = aux_rand; + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig, msg, sizeof(msg), &keypair, &extraparams) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig2, msg, &keypair, extraparams.ndata) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(sig, sig2, sizeof(sig)) == 0); } #define N_SIGS 3 @@ -709,8 +776,8 @@ void test_schnorrsig_sign_verify(void) { for (i = 0; i < N_SIGS; i++) { rustsecp256k1zkp_v0_4_0_testrand256(msg[i]); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL, NULL)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[i], msg[i], &pk)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig[i], msg[i], &keypair, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[i], msg[i], sizeof(msg[i]), &pk)); } { @@ -720,36 +787,54 @@ void test_schnorrsig_sign_verify(void) { size_t byte_idx = rustsecp256k1zkp_v0_4_0_testrand_int(32); unsigned char xorbyte = rustsecp256k1zkp_v0_4_0_testrand_int(254)+1; sig[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][byte_idx] ^= xorbyte; byte_idx = rustsecp256k1zkp_v0_4_0_testrand_int(32); sig[sig_idx][32+byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); sig[sig_idx][32+byte_idx] ^= xorbyte; byte_idx = rustsecp256k1zkp_v0_4_0_testrand_int(32); msg[sig_idx][byte_idx] ^= xorbyte; - CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); msg[sig_idx][byte_idx] ^= xorbyte; /* Check that above bitflips have been reversed correctly */ - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], &pk)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk)); } /* Test overflowing s */ - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); memset(&sig[0][32], 0xFF, 32); - CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); /* Test negative s */ - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL, NULL)); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig[0], msg[0], &keypair, NULL)); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); rustsecp256k1zkp_v0_4_0_scalar_set_b32(&s, &sig[0][32], NULL); rustsecp256k1zkp_v0_4_0_scalar_negate(&s, &s); rustsecp256k1zkp_v0_4_0_scalar_get_b32(&sig[0][32], &s); - CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], &pk)); + CHECK(!rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg[0], sizeof(msg[0]), &pk)); + + /* The empty message can be signed & verified */ + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig[0], NULL, 0, &keypair, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], NULL, 0, &pk) == 1); + + { + /* Test varying message lengths */ + unsigned char msg_large[32 * 8]; + uint32_t msglen = rustsecp256k1zkp_v0_4_0_testrand_int(sizeof(msg_large)); + for (i = 0; i < sizeof(msg_large); i += 32) { + rustsecp256k1zkp_v0_4_0_testrand256(&msg_large[i]); + } + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign_custom(ctx, sig[0], msg_large, msglen, &keypair, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 1); + /* Verification for a random wrong message length fails */ + msglen = (msglen + (sizeof(msg_large) - 1)) % sizeof(msg_large); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig[0], msg_large, msglen, &pk) == 0); + } } #undef N_SIGS @@ -777,10 +862,10 @@ void test_schnorrsig_taproot(void) { /* Key spend */ rustsecp256k1zkp_v0_4_0_testrand256(msg); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL) == 1); /* Verify key spend */ CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_parse(ctx, &output_pk, output_pk_bytes) == 1); - CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, &output_pk) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_schnorrsig_verify(ctx, sig, msg, sizeof(msg), &output_pk) == 1); /* Script spend */ CHECK(rustsecp256k1zkp_v0_4_0_xonly_pubkey_serialize(ctx, internal_pk_bytes, &internal_pk) == 1); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/num.h b/secp256k1-zkp-sys/depend/secp256k1/src/num.h deleted file mode 100644 index 67e3dc6e..00000000 --- a/secp256k1-zkp-sys/depend/secp256k1/src/num.h +++ /dev/null @@ -1,74 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_H -#define SECP256K1_NUM_H - -#ifndef USE_NUM_NONE - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#if defined(USE_NUM_GMP) -#include "num_gmp.h" -#else -#error "Please select num implementation" -#endif - -/** Copy a number. */ -static void rustsecp256k1zkp_v0_4_0_num_copy(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a); - -/** Convert a number's absolute value to a binary big-endian string. - * There must be enough place. */ -static void rustsecp256k1zkp_v0_4_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1zkp_v0_4_0_num *a); - -/** Set a number to the value of a binary big-endian string. */ -static void rustsecp256k1zkp_v0_4_0_num_set_bin(rustsecp256k1zkp_v0_4_0_num *r, const unsigned char *a, unsigned int alen); - -/** Compute a modular inverse. The input must be less than the modulus. */ -static void rustsecp256k1zkp_v0_4_0_num_mod_inverse(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *m); - -/** Compute the jacobi symbol (a|b). b must be positive and odd. */ -static int rustsecp256k1zkp_v0_4_0_num_jacobi(const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b); - -/** Compare the absolute value of two numbers. */ -static int rustsecp256k1zkp_v0_4_0_num_cmp(const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b); - -/** Test whether two number are equal (including sign). */ -static int rustsecp256k1zkp_v0_4_0_num_eq(const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b); - -/** Add two (signed) numbers. */ -static void rustsecp256k1zkp_v0_4_0_num_add(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b); - -/** Subtract two (signed) numbers. */ -static void rustsecp256k1zkp_v0_4_0_num_sub(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b); - -/** Multiply two (signed) numbers. */ -static void rustsecp256k1zkp_v0_4_0_num_mul(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b); - -/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1, - even if r was negative. */ -static void rustsecp256k1zkp_v0_4_0_num_mod(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *m); - -/** Right-shift the passed number by bits bits. */ -static void rustsecp256k1zkp_v0_4_0_num_shift(rustsecp256k1zkp_v0_4_0_num *r, int bits); - -/** Check whether a number is zero. */ -static int rustsecp256k1zkp_v0_4_0_num_is_zero(const rustsecp256k1zkp_v0_4_0_num *a); - -/** Check whether a number is one. */ -static int rustsecp256k1zkp_v0_4_0_num_is_one(const rustsecp256k1zkp_v0_4_0_num *a); - -/** Check whether a number is strictly negative. */ -static int rustsecp256k1zkp_v0_4_0_num_is_neg(const rustsecp256k1zkp_v0_4_0_num *a); - -/** Change a number's sign. */ -static void rustsecp256k1zkp_v0_4_0_num_negate(rustsecp256k1zkp_v0_4_0_num *r); - -#endif - -#endif /* SECP256K1_NUM_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/num_gmp.h b/secp256k1-zkp-sys/depend/secp256k1/src/num_gmp.h deleted file mode 100644 index 06dfb14f..00000000 --- a/secp256k1-zkp-sys/depend/secp256k1/src/num_gmp.h +++ /dev/null @@ -1,20 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_REPR_H -#define SECP256K1_NUM_REPR_H - -#include - -#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS) - -typedef struct { - mp_limb_t data[2*NUM_LIMBS]; - int neg; - int limbs; -} rustsecp256k1zkp_v0_4_0_num; - -#endif /* SECP256K1_NUM_REPR_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/num_gmp_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/num_gmp_impl.h deleted file mode 100644 index 01f84ed4..00000000 --- a/secp256k1-zkp-sys/depend/secp256k1/src/num_gmp_impl.h +++ /dev/null @@ -1,288 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_REPR_IMPL_H -#define SECP256K1_NUM_REPR_IMPL_H - -#include -#include -#include - -#include "util.h" -#include "num.h" - -#ifdef VERIFY -static void rustsecp256k1zkp_v0_4_0_num_sanity(const rustsecp256k1zkp_v0_4_0_num *a) { - VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0)); -} -#else -#define rustsecp256k1zkp_v0_4_0_num_sanity(a) do { } while(0) -#endif - -static void rustsecp256k1zkp_v0_4_0_num_copy(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a) { - *r = *a; -} - -static void rustsecp256k1zkp_v0_4_0_num_get_bin(unsigned char *r, unsigned int rlen, const rustsecp256k1zkp_v0_4_0_num *a) { - unsigned char tmp[65]; - int len = 0; - int shift = 0; - if (a->limbs>1 || a->data[0] != 0) { - len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs); - } - while (shift < len && tmp[shift] == 0) shift++; - VERIFY_CHECK(len-shift <= (int)rlen); - memset(r, 0, rlen - len + shift); - if (len > shift) { - memcpy(r + rlen - len + shift, tmp + shift, len - shift); - } - memset(tmp, 0, sizeof(tmp)); -} - -static void rustsecp256k1zkp_v0_4_0_num_set_bin(rustsecp256k1zkp_v0_4_0_num *r, const unsigned char *a, unsigned int alen) { - int len; - VERIFY_CHECK(alen > 0); - VERIFY_CHECK(alen <= 64); - len = mpn_set_str(r->data, a, alen, 256); - if (len == 0) { - r->data[0] = 0; - len = 1; - } - VERIFY_CHECK(len <= NUM_LIMBS*2); - r->limbs = len; - r->neg = 0; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void rustsecp256k1zkp_v0_4_0_num_add_abs(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs); - r->limbs = a->limbs; - if (c != 0) { - VERIFY_CHECK(r->limbs < 2*NUM_LIMBS); - r->data[r->limbs++] = c; - } -} - -static void rustsecp256k1zkp_v0_4_0_num_sub_abs(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs); - (void)c; - VERIFY_CHECK(c == 0); - r->limbs = a->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void rustsecp256k1zkp_v0_4_0_num_mod(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *m) { - rustsecp256k1zkp_v0_4_0_num_sanity(r); - rustsecp256k1zkp_v0_4_0_num_sanity(m); - - if (r->limbs >= m->limbs) { - mp_limb_t t[2*NUM_LIMBS]; - mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs); - memset(t, 0, sizeof(t)); - r->limbs = m->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } - } - - if (r->neg && (r->limbs > 1 || r->data[0] != 0)) { - rustsecp256k1zkp_v0_4_0_num_sub_abs(r, m, r); - r->neg = 0; - } -} - -static void rustsecp256k1zkp_v0_4_0_num_mod_inverse(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *m) { - int i; - mp_limb_t g[NUM_LIMBS+1]; - mp_limb_t u[NUM_LIMBS+1]; - mp_limb_t v[NUM_LIMBS+1]; - mp_size_t sn; - mp_size_t gn; - rustsecp256k1zkp_v0_4_0_num_sanity(a); - rustsecp256k1zkp_v0_4_0_num_sanity(m); - - /** mpn_gcdext computes: (G,S) = gcdext(U,V), where - * * G = gcd(U,V) - * * G = U*S + V*T - * * U has equal or more limbs than V, and V has no padding - * If we set U to be (a padded version of) a, and V = m: - * G = a*S + m*T - * G = a*S mod m - * Assuming G=1: - * S = 1/a mod m - */ - VERIFY_CHECK(m->limbs <= NUM_LIMBS); - VERIFY_CHECK(m->data[m->limbs-1] != 0); - for (i = 0; i < m->limbs; i++) { - u[i] = (i < a->limbs) ? a->data[i] : 0; - v[i] = m->data[i]; - } - sn = NUM_LIMBS+1; - gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs); - (void)gn; - VERIFY_CHECK(gn == 1); - VERIFY_CHECK(g[0] == 1); - r->neg = a->neg ^ m->neg; - if (sn < 0) { - mpn_sub(r->data, m->data, m->limbs, r->data, -sn); - r->limbs = m->limbs; - while (r->limbs > 1 && r->data[r->limbs-1]==0) { - r->limbs--; - } - } else { - r->limbs = sn; - } - memset(g, 0, sizeof(g)); - memset(u, 0, sizeof(u)); - memset(v, 0, sizeof(v)); -} - -static int rustsecp256k1zkp_v0_4_0_num_jacobi(const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - int ret; - mpz_t ga, gb; - rustsecp256k1zkp_v0_4_0_num_sanity(a); - rustsecp256k1zkp_v0_4_0_num_sanity(b); - VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1)); - - mpz_inits(ga, gb, NULL); - - mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data); - mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data); - if (a->neg) { - mpz_neg(ga, ga); - } - - ret = mpz_jacobi(ga, gb); - - mpz_clears(ga, gb, NULL); - - return ret; -} - -static int rustsecp256k1zkp_v0_4_0_num_is_one(const rustsecp256k1zkp_v0_4_0_num *a) { - return (a->limbs == 1 && a->data[0] == 1); -} - -static int rustsecp256k1zkp_v0_4_0_num_is_zero(const rustsecp256k1zkp_v0_4_0_num *a) { - return (a->limbs == 1 && a->data[0] == 0); -} - -static int rustsecp256k1zkp_v0_4_0_num_is_neg(const rustsecp256k1zkp_v0_4_0_num *a) { - return (a->limbs > 1 || a->data[0] != 0) && a->neg; -} - -static int rustsecp256k1zkp_v0_4_0_num_cmp(const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - if (a->limbs > b->limbs) { - return 1; - } - if (a->limbs < b->limbs) { - return -1; - } - return mpn_cmp(a->data, b->data, a->limbs); -} - -static int rustsecp256k1zkp_v0_4_0_num_eq(const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - if (a->limbs > b->limbs) { - return 0; - } - if (a->limbs < b->limbs) { - return 0; - } - if ((a->neg && !rustsecp256k1zkp_v0_4_0_num_is_zero(a)) != (b->neg && !rustsecp256k1zkp_v0_4_0_num_is_zero(b))) { - return 0; - } - return mpn_cmp(a->data, b->data, a->limbs) == 0; -} - -static void rustsecp256k1zkp_v0_4_0_num_subadd(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b, int bneg) { - if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */ - r->neg = a->neg; - if (a->limbs >= b->limbs) { - rustsecp256k1zkp_v0_4_0_num_add_abs(r, a, b); - } else { - rustsecp256k1zkp_v0_4_0_num_add_abs(r, b, a); - } - } else { - if (rustsecp256k1zkp_v0_4_0_num_cmp(a, b) > 0) { - r->neg = a->neg; - rustsecp256k1zkp_v0_4_0_num_sub_abs(r, a, b); - } else { - r->neg = b->neg ^ bneg; - rustsecp256k1zkp_v0_4_0_num_sub_abs(r, b, a); - } - } -} - -static void rustsecp256k1zkp_v0_4_0_num_add(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - rustsecp256k1zkp_v0_4_0_num_sanity(a); - rustsecp256k1zkp_v0_4_0_num_sanity(b); - rustsecp256k1zkp_v0_4_0_num_subadd(r, a, b, 0); -} - -static void rustsecp256k1zkp_v0_4_0_num_sub(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - rustsecp256k1zkp_v0_4_0_num_sanity(a); - rustsecp256k1zkp_v0_4_0_num_sanity(b); - rustsecp256k1zkp_v0_4_0_num_subadd(r, a, b, 1); -} - -static void rustsecp256k1zkp_v0_4_0_num_mul(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_num *a, const rustsecp256k1zkp_v0_4_0_num *b) { - mp_limb_t tmp[2*NUM_LIMBS+1]; - rustsecp256k1zkp_v0_4_0_num_sanity(a); - rustsecp256k1zkp_v0_4_0_num_sanity(b); - - VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1); - if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) { - r->limbs = 1; - r->neg = 0; - r->data[0] = 0; - return; - } - if (a->limbs >= b->limbs) { - mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs); - } else { - mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs); - } - r->limbs = a->limbs + b->limbs; - if (r->limbs > 1 && tmp[r->limbs - 1]==0) { - r->limbs--; - } - VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS); - mpn_copyi(r->data, tmp, r->limbs); - r->neg = a->neg ^ b->neg; - memset(tmp, 0, sizeof(tmp)); -} - -static void rustsecp256k1zkp_v0_4_0_num_shift(rustsecp256k1zkp_v0_4_0_num *r, int bits) { - if (bits % GMP_NUMB_BITS) { - /* Shift within limbs. */ - mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS); - } - if (bits >= GMP_NUMB_BITS) { - int i; - /* Shift full limbs. */ - for (i = 0; i < r->limbs; i++) { - int index = i + (bits / GMP_NUMB_BITS); - if (index < r->limbs && index < 2*NUM_LIMBS) { - r->data[i] = r->data[index]; - } else { - r->data[i] = 0; - } - } - } - while (r->limbs>1 && r->data[r->limbs-1]==0) { - r->limbs--; - } -} - -static void rustsecp256k1zkp_v0_4_0_num_negate(rustsecp256k1zkp_v0_4_0_num *r) { - r->neg ^= 1; -} - -#endif /* SECP256K1_NUM_REPR_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/num_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/num_impl.h deleted file mode 100644 index 880598ef..00000000 --- a/secp256k1-zkp-sys/depend/secp256k1/src/num_impl.h +++ /dev/null @@ -1,24 +0,0 @@ -/*********************************************************************** - * Copyright (c) 2013, 2014 Pieter Wuille * - * Distributed under the MIT software license, see the accompanying * - * file COPYING or https://www.opensource.org/licenses/mit-license.php.* - ***********************************************************************/ - -#ifndef SECP256K1_NUM_IMPL_H -#define SECP256K1_NUM_IMPL_H - -#if defined HAVE_CONFIG_H -#include "libsecp256k1-config.h" -#endif - -#include "num.h" - -#if defined(USE_NUM_GMP) -#include "num_gmp_impl.h" -#elif defined(USE_NUM_NONE) -/* Nothing. */ -#else -#error "Please select num implementation" -#endif - -#endif /* SECP256K1_NUM_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/scalar.h b/secp256k1-zkp-sys/depend/secp256k1/src/scalar.h index 61a5d4ba..d76d2c4b 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/scalar.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/scalar.h @@ -7,7 +7,6 @@ #ifndef SECP256K1_SCALAR_H #define SECP256K1_SCALAR_H -#include "num.h" #include "util.h" #if defined HAVE_CONFIG_H @@ -66,9 +65,6 @@ static void rustsecp256k1zkp_v0_4_0_scalar_mul(rustsecp256k1zkp_v0_4_0_scalar *r * the low bits that were shifted off */ static int rustsecp256k1zkp_v0_4_0_scalar_shr_int(rustsecp256k1zkp_v0_4_0_scalar *r, int n); -/** Compute the square of a scalar (modulo the group order). */ -static void rustsecp256k1zkp_v0_4_0_scalar_sqr(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *a); - /** Compute the inverse of a scalar (modulo the group order). */ static void rustsecp256k1zkp_v0_4_0_scalar_inverse(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *a); @@ -94,14 +90,6 @@ static int rustsecp256k1zkp_v0_4_0_scalar_is_high(const rustsecp256k1zkp_v0_4_0_ * Returns -1 if the number was negated, 1 otherwise */ static int rustsecp256k1zkp_v0_4_0_scalar_cond_negate(rustsecp256k1zkp_v0_4_0_scalar *a, int flag); -#ifndef USE_NUM_NONE -/** Convert a scalar to a number. */ -static void rustsecp256k1zkp_v0_4_0_scalar_get_num(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_scalar *a); - -/** Get the order of the group as a number. */ -static void rustsecp256k1zkp_v0_4_0_scalar_order_get_num(rustsecp256k1zkp_v0_4_0_num *r); -#endif - /** Compare two scalars. */ static int rustsecp256k1zkp_v0_4_0_scalar_eq(const rustsecp256k1zkp_v0_4_0_scalar *a, const rustsecp256k1zkp_v0_4_0_scalar *b); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_4x64_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_4x64_impl.h index d6cb9fb0..a45858c9 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_4x64_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_4x64_impl.h @@ -10,6 +10,8 @@ #include "scalar.h" #include +#include "modinv64_impl.h" + /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL) #define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL) @@ -222,28 +224,6 @@ static int rustsecp256k1zkp_v0_4_0_scalar_cond_negate(rustsecp256k1zkp_v0_4_0_sc VERIFY_CHECK(c1 >= th); \ } -/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd2(a,b) { \ - uint64_t tl, th, th2, tl2; \ - { \ - uint128_t t = (uint128_t)a * b; \ - th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \ - tl = t; \ - } \ - th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \ - c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ - tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \ - th2 += (tl2 < tl); /* at most 0xFFFFFFFFFFFFFFFF */ \ - c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ - c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ - c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ -} - /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ #define sumadd(a) { \ unsigned int over; \ @@ -753,148 +733,10 @@ static void rustsecp256k1zkp_v0_4_0_scalar_mul_512(uint64_t l[8], const rustsecp #endif } -static void rustsecp256k1zkp_v0_4_0_scalar_sqr_512(uint64_t l[8], const rustsecp256k1zkp_v0_4_0_scalar *a) { -#ifdef USE_ASM_X86_64 - __asm__ __volatile__( - /* Preload */ - "movq 0(%%rdi), %%r11\n" - "movq 8(%%rdi), %%r12\n" - "movq 16(%%rdi), %%r13\n" - "movq 24(%%rdi), %%r14\n" - /* (rax,rdx) = a0 * a0 */ - "movq %%r11, %%rax\n" - "mulq %%r11\n" - /* Extract l0 */ - "movq %%rax, 0(%%rsi)\n" - /* (r8,r9,r10) = (rdx,0) */ - "movq %%rdx, %%r8\n" - "xorq %%r9, %%r9\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += 2 * a0 * a1 */ - "movq %%r11, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l1 */ - "movq %%r8, 8(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += 2 * a0 * a2 */ - "movq %%r11, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* (r9,r10,r8) += a1 * a1 */ - "movq %%r12, %%rax\n" - "mulq %%r12\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l2 */ - "movq %%r9, 16(%%rsi)\n" - "xorq %%r9, %%r9\n" - /* (r10,r8,r9) += 2 * a0 * a3 */ - "movq %%r11, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* (r10,r8,r9) += 2 * a1 * a2 */ - "movq %%r12, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - "adcq $0, %%r9\n" - /* Extract l3 */ - "movq %%r10, 24(%%rsi)\n" - "xorq %%r10, %%r10\n" - /* (r8,r9,r10) += 2 * a1 * a3 */ - "movq %%r12, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* (r8,r9,r10) += a2 * a2 */ - "movq %%r13, %%rax\n" - "mulq %%r13\n" - "addq %%rax, %%r8\n" - "adcq %%rdx, %%r9\n" - "adcq $0, %%r10\n" - /* Extract l4 */ - "movq %%r8, 32(%%rsi)\n" - "xorq %%r8, %%r8\n" - /* (r9,r10,r8) += 2 * a2 * a3 */ - "movq %%r13, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - "addq %%rax, %%r9\n" - "adcq %%rdx, %%r10\n" - "adcq $0, %%r8\n" - /* Extract l5 */ - "movq %%r9, 40(%%rsi)\n" - /* (r10,r8) += a3 * a3 */ - "movq %%r14, %%rax\n" - "mulq %%r14\n" - "addq %%rax, %%r10\n" - "adcq %%rdx, %%r8\n" - /* Extract l6 */ - "movq %%r10, 48(%%rsi)\n" - /* Extract l7 */ - "movq %%r8, 56(%%rsi)\n" - : - : "S"(l), "D"(a->d) - : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory"); -#else - /* 160 bit accumulator. */ - uint64_t c0 = 0, c1 = 0; - uint32_t c2 = 0; - - /* l[0..7] = a[0..3] * b[0..3]. */ - muladd_fast(a->d[0], a->d[0]); - extract_fast(l[0]); - muladd2(a->d[0], a->d[1]); - extract(l[1]); - muladd2(a->d[0], a->d[2]); - muladd(a->d[1], a->d[1]); - extract(l[2]); - muladd2(a->d[0], a->d[3]); - muladd2(a->d[1], a->d[2]); - extract(l[3]); - muladd2(a->d[1], a->d[3]); - muladd(a->d[2], a->d[2]); - extract(l[4]); - muladd2(a->d[2], a->d[3]); - extract(l[5]); - muladd_fast(a->d[3], a->d[3]); - extract_fast(l[6]); - VERIFY_CHECK(c1 == 0); - l[7] = c0; -#endif -} - #undef sumadd #undef sumadd_fast #undef muladd #undef muladd_fast -#undef muladd2 #undef extract #undef extract_fast @@ -916,12 +758,6 @@ static int rustsecp256k1zkp_v0_4_0_scalar_shr_int(rustsecp256k1zkp_v0_4_0_scalar return ret; } -static void rustsecp256k1zkp_v0_4_0_scalar_sqr(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *a) { - uint64_t l[8]; - rustsecp256k1zkp_v0_4_0_scalar_sqr_512(l, a); - rustsecp256k1zkp_v0_4_0_scalar_reduce_512(r, l); -} - static void rustsecp256k1zkp_v0_4_0_scalar_split_128(rustsecp256k1zkp_v0_4_0_scalar *r1, rustsecp256k1zkp_v0_4_0_scalar *r2, const rustsecp256k1zkp_v0_4_0_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -1052,4 +888,78 @@ static void rustsecp256k1zkp_v0_4_0_scalar_chacha20(rustsecp256k1zkp_v0_4_0_scal #undef QUARTERROUND #undef LE32 +static void rustsecp256k1zkp_v0_4_0_scalar_from_signed62(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_modinv64_signed62 *a) { + const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4]; + + /* The output from rustsecp256k1zkp_v0_4_0_modinv64{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4). + */ + VERIFY_CHECK(a0 >> 62 == 0); + VERIFY_CHECK(a1 >> 62 == 0); + VERIFY_CHECK(a2 >> 62 == 0); + VERIFY_CHECK(a3 >> 62 == 0); + VERIFY_CHECK(a4 >> 8 == 0); + + r->d[0] = a0 | a1 << 62; + r->d[1] = a1 >> 2 | a2 << 60; + r->d[2] = a2 >> 4 | a3 << 58; + r->d[3] = a3 >> 6 | a4 << 56; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_check_overflow(r) == 0); +#endif +} + +static void rustsecp256k1zkp_v0_4_0_scalar_to_signed62(rustsecp256k1zkp_v0_4_0_modinv64_signed62 *r, const rustsecp256k1zkp_v0_4_0_scalar *a) { + const uint64_t M62 = UINT64_MAX >> 2; + const uint64_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3]; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_check_overflow(a) == 0); +#endif + + r->v[0] = a0 & M62; + r->v[1] = (a0 >> 62 | a1 << 2) & M62; + r->v[2] = (a1 >> 60 | a2 << 4) & M62; + r->v[3] = (a2 >> 58 | a3 << 6) & M62; + r->v[4] = a3 >> 56; +} + +static const rustsecp256k1zkp_v0_4_0_modinv64_modinfo rustsecp256k1zkp_v0_4_0_const_modinfo_scalar = { + {{0x3FD25E8CD0364141LL, 0x2ABB739ABD2280EELL, -0x15LL, 0, 256}}, + 0x34F20099AA774EC1LL +}; + +static void rustsecp256k1zkp_v0_4_0_scalar_inverse(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { + rustsecp256k1zkp_v0_4_0_modinv64_signed62 s; +#ifdef VERIFY + int zero_in = rustsecp256k1zkp_v0_4_0_scalar_is_zero(x); +#endif + rustsecp256k1zkp_v0_4_0_scalar_to_signed62(&s, x); + rustsecp256k1zkp_v0_4_0_modinv64(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_scalar); + rustsecp256k1zkp_v0_4_0_scalar_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(r) == zero_in); +#endif +} + +static void rustsecp256k1zkp_v0_4_0_scalar_inverse_var(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { + rustsecp256k1zkp_v0_4_0_modinv64_signed62 s; +#ifdef VERIFY + int zero_in = rustsecp256k1zkp_v0_4_0_scalar_is_zero(x); +#endif + rustsecp256k1zkp_v0_4_0_scalar_to_signed62(&s, x); + rustsecp256k1zkp_v0_4_0_modinv64_var(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_scalar); + rustsecp256k1zkp_v0_4_0_scalar_from_signed62(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(r) == zero_in); +#endif +} + +SECP256K1_INLINE static int rustsecp256k1zkp_v0_4_0_scalar_is_even(const rustsecp256k1zkp_v0_4_0_scalar *a) { + return !(a->d[0] & 1); +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_8x32_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_8x32_impl.h index d93b175f..0c6c39c9 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_8x32_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_8x32_impl.h @@ -9,6 +9,8 @@ #include +#include "modinv32_impl.h" + /* Limbs of the secp256k1 order. */ #define SECP256K1_N_0 ((uint32_t)0xD0364141UL) #define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL) @@ -304,28 +306,6 @@ static int rustsecp256k1zkp_v0_4_0_scalar_cond_negate(rustsecp256k1zkp_v0_4_0_sc VERIFY_CHECK(c1 >= th); \ } -/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */ -#define muladd2(a,b) { \ - uint32_t tl, th, th2, tl2; \ - { \ - uint64_t t = (uint64_t)a * b; \ - th = t >> 32; /* at most 0xFFFFFFFE */ \ - tl = t; \ - } \ - th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \ - c2 += (th2 < th); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((th2 >= th) || (c2 != 0)); \ - tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \ - th2 += (tl2 < tl); /* at most 0xFFFFFFFF */ \ - c0 += tl2; /* overflow is handled on the next line */ \ - th2 += (c0 < tl2); /* second overflow is handled on the next line */ \ - c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \ - c1 += th2; /* overflow is handled on the next line */ \ - c2 += (c1 < th2); /* never overflows by contract (verified the next line) */ \ - VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \ -} - /** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */ #define sumadd(a) { \ unsigned int over; \ @@ -589,71 +569,10 @@ static void rustsecp256k1zkp_v0_4_0_scalar_mul_512(uint32_t *l, const rustsecp25 l[15] = c0; } -static void rustsecp256k1zkp_v0_4_0_scalar_sqr_512(uint32_t *l, const rustsecp256k1zkp_v0_4_0_scalar *a) { - /* 96 bit accumulator. */ - uint32_t c0 = 0, c1 = 0, c2 = 0; - - /* l[0..15] = a[0..7]^2. */ - muladd_fast(a->d[0], a->d[0]); - extract_fast(l[0]); - muladd2(a->d[0], a->d[1]); - extract(l[1]); - muladd2(a->d[0], a->d[2]); - muladd(a->d[1], a->d[1]); - extract(l[2]); - muladd2(a->d[0], a->d[3]); - muladd2(a->d[1], a->d[2]); - extract(l[3]); - muladd2(a->d[0], a->d[4]); - muladd2(a->d[1], a->d[3]); - muladd(a->d[2], a->d[2]); - extract(l[4]); - muladd2(a->d[0], a->d[5]); - muladd2(a->d[1], a->d[4]); - muladd2(a->d[2], a->d[3]); - extract(l[5]); - muladd2(a->d[0], a->d[6]); - muladd2(a->d[1], a->d[5]); - muladd2(a->d[2], a->d[4]); - muladd(a->d[3], a->d[3]); - extract(l[6]); - muladd2(a->d[0], a->d[7]); - muladd2(a->d[1], a->d[6]); - muladd2(a->d[2], a->d[5]); - muladd2(a->d[3], a->d[4]); - extract(l[7]); - muladd2(a->d[1], a->d[7]); - muladd2(a->d[2], a->d[6]); - muladd2(a->d[3], a->d[5]); - muladd(a->d[4], a->d[4]); - extract(l[8]); - muladd2(a->d[2], a->d[7]); - muladd2(a->d[3], a->d[6]); - muladd2(a->d[4], a->d[5]); - extract(l[9]); - muladd2(a->d[3], a->d[7]); - muladd2(a->d[4], a->d[6]); - muladd(a->d[5], a->d[5]); - extract(l[10]); - muladd2(a->d[4], a->d[7]); - muladd2(a->d[5], a->d[6]); - extract(l[11]); - muladd2(a->d[5], a->d[7]); - muladd(a->d[6], a->d[6]); - extract(l[12]); - muladd2(a->d[6], a->d[7]); - extract(l[13]); - muladd_fast(a->d[7], a->d[7]); - extract_fast(l[14]); - VERIFY_CHECK(c1 == 0); - l[15] = c0; -} - #undef sumadd #undef sumadd_fast #undef muladd #undef muladd_fast -#undef muladd2 #undef extract #undef extract_fast @@ -679,12 +598,6 @@ static int rustsecp256k1zkp_v0_4_0_scalar_shr_int(rustsecp256k1zkp_v0_4_0_scalar return ret; } -static void rustsecp256k1zkp_v0_4_0_scalar_sqr(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *a) { - uint32_t l[16]; - rustsecp256k1zkp_v0_4_0_scalar_sqr_512(l, a); - rustsecp256k1zkp_v0_4_0_scalar_reduce_512(r, l); -} - static void rustsecp256k1zkp_v0_4_0_scalar_split_128(rustsecp256k1zkp_v0_4_0_scalar *r1, rustsecp256k1zkp_v0_4_0_scalar *r2, const rustsecp256k1zkp_v0_4_0_scalar *k) { r1->d[0] = k->d[0]; r1->d[1] = k->d[1]; @@ -839,4 +752,92 @@ static void rustsecp256k1zkp_v0_4_0_scalar_chacha20(rustsecp256k1zkp_v0_4_0_scal #undef QUARTERROUND #undef LE32 +static void rustsecp256k1zkp_v0_4_0_scalar_from_signed30(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_modinv32_signed30 *a) { + const uint32_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4], + a5 = a->v[5], a6 = a->v[6], a7 = a->v[7], a8 = a->v[8]; + + /* The output from rustsecp256k1zkp_v0_4_0_modinv32{_var} should be normalized to range [0,modulus), and + * have limbs in [0,2^30). The modulus is < 2^256, so the top limb must be below 2^(256-30*8). + */ + VERIFY_CHECK(a0 >> 30 == 0); + VERIFY_CHECK(a1 >> 30 == 0); + VERIFY_CHECK(a2 >> 30 == 0); + VERIFY_CHECK(a3 >> 30 == 0); + VERIFY_CHECK(a4 >> 30 == 0); + VERIFY_CHECK(a5 >> 30 == 0); + VERIFY_CHECK(a6 >> 30 == 0); + VERIFY_CHECK(a7 >> 30 == 0); + VERIFY_CHECK(a8 >> 16 == 0); + + r->d[0] = a0 | a1 << 30; + r->d[1] = a1 >> 2 | a2 << 28; + r->d[2] = a2 >> 4 | a3 << 26; + r->d[3] = a3 >> 6 | a4 << 24; + r->d[4] = a4 >> 8 | a5 << 22; + r->d[5] = a5 >> 10 | a6 << 20; + r->d[6] = a6 >> 12 | a7 << 18; + r->d[7] = a7 >> 14 | a8 << 16; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_check_overflow(r) == 0); +#endif +} + +static void rustsecp256k1zkp_v0_4_0_scalar_to_signed30(rustsecp256k1zkp_v0_4_0_modinv32_signed30 *r, const rustsecp256k1zkp_v0_4_0_scalar *a) { + const uint32_t M30 = UINT32_MAX >> 2; + const uint32_t a0 = a->d[0], a1 = a->d[1], a2 = a->d[2], a3 = a->d[3], + a4 = a->d[4], a5 = a->d[5], a6 = a->d[6], a7 = a->d[7]; + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_check_overflow(a) == 0); +#endif + + r->v[0] = a0 & M30; + r->v[1] = (a0 >> 30 | a1 << 2) & M30; + r->v[2] = (a1 >> 28 | a2 << 4) & M30; + r->v[3] = (a2 >> 26 | a3 << 6) & M30; + r->v[4] = (a3 >> 24 | a4 << 8) & M30; + r->v[5] = (a4 >> 22 | a5 << 10) & M30; + r->v[6] = (a5 >> 20 | a6 << 12) & M30; + r->v[7] = (a6 >> 18 | a7 << 14) & M30; + r->v[8] = a7 >> 16; +} + +static const rustsecp256k1zkp_v0_4_0_modinv32_modinfo rustsecp256k1zkp_v0_4_0_const_modinfo_scalar = { + {{0x10364141L, 0x3F497A33L, 0x348A03BBL, 0x2BB739ABL, -0x146L, 0, 0, 0, 65536}}, + 0x2A774EC1L +}; + +static void rustsecp256k1zkp_v0_4_0_scalar_inverse(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { + rustsecp256k1zkp_v0_4_0_modinv32_signed30 s; +#ifdef VERIFY + int zero_in = rustsecp256k1zkp_v0_4_0_scalar_is_zero(x); +#endif + rustsecp256k1zkp_v0_4_0_scalar_to_signed30(&s, x); + rustsecp256k1zkp_v0_4_0_modinv32(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_scalar); + rustsecp256k1zkp_v0_4_0_scalar_from_signed30(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(r) == zero_in); +#endif +} + +static void rustsecp256k1zkp_v0_4_0_scalar_inverse_var(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { + rustsecp256k1zkp_v0_4_0_modinv32_signed30 s; +#ifdef VERIFY + int zero_in = rustsecp256k1zkp_v0_4_0_scalar_is_zero(x); +#endif + rustsecp256k1zkp_v0_4_0_scalar_to_signed30(&s, x); + rustsecp256k1zkp_v0_4_0_modinv32_var(&s, &rustsecp256k1zkp_v0_4_0_const_modinfo_scalar); + rustsecp256k1zkp_v0_4_0_scalar_from_signed30(r, &s); + +#ifdef VERIFY + VERIFY_CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(r) == zero_in); +#endif +} + +SECP256K1_INLINE static int rustsecp256k1zkp_v0_4_0_scalar_is_even(const rustsecp256k1zkp_v0_4_0_scalar *a) { + return !(a->d[0] & 1); +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_impl.h index 550b3139..2fbaff2b 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_impl.h @@ -31,231 +31,12 @@ static const rustsecp256k1zkp_v0_4_0_scalar rustsecp256k1zkp_v0_4_0_scalar_one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1); static const rustsecp256k1zkp_v0_4_0_scalar rustsecp256k1zkp_v0_4_0_scalar_zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0); -#ifndef USE_NUM_NONE -static void rustsecp256k1zkp_v0_4_0_scalar_get_num(rustsecp256k1zkp_v0_4_0_num *r, const rustsecp256k1zkp_v0_4_0_scalar *a) { - unsigned char c[32]; - rustsecp256k1zkp_v0_4_0_scalar_get_b32(c, a); - rustsecp256k1zkp_v0_4_0_num_set_bin(r, c, 32); -} - -/** secp256k1 curve order, see rustsecp256k1zkp_v0_4_0_ecdsa_const_order_as_fe in ecdsa_impl.h */ -static void rustsecp256k1zkp_v0_4_0_scalar_order_get_num(rustsecp256k1zkp_v0_4_0_num *r) { -#if defined(EXHAUSTIVE_TEST_ORDER) - static const unsigned char order[32] = { - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER - }; -#else - static const unsigned char order[32] = { - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF, - 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE, - 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B, - 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41 - }; -#endif - rustsecp256k1zkp_v0_4_0_num_set_bin(r, order, 32); -} -#endif - static int rustsecp256k1zkp_v0_4_0_scalar_set_b32_seckey(rustsecp256k1zkp_v0_4_0_scalar *r, const unsigned char *bin) { int overflow; rustsecp256k1zkp_v0_4_0_scalar_set_b32(r, bin, &overflow); return (!overflow) & (!rustsecp256k1zkp_v0_4_0_scalar_is_zero(r)); } -static void rustsecp256k1zkp_v0_4_0_scalar_inverse(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { -#if defined(EXHAUSTIVE_TEST_ORDER) - int i; - *r = 0; - for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) - if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) - *r = i; - /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus - * have a composite group order; fix it in exhaustive_tests.c). */ - VERIFY_CHECK(*r != 0); -} -#else - rustsecp256k1zkp_v0_4_0_scalar *t; - int i; - /* First compute xN as x ^ (2^N - 1) for some values of N, - * and uM as x ^ M for some values of M. */ - rustsecp256k1zkp_v0_4_0_scalar x2, x3, x6, x8, x14, x28, x56, x112, x126; - rustsecp256k1zkp_v0_4_0_scalar u2, u5, u9, u11, u13; - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&u2, x); - rustsecp256k1zkp_v0_4_0_scalar_mul(&x2, &u2, x); - rustsecp256k1zkp_v0_4_0_scalar_mul(&u5, &u2, &x2); - rustsecp256k1zkp_v0_4_0_scalar_mul(&x3, &u5, &u2); - rustsecp256k1zkp_v0_4_0_scalar_mul(&u9, &x3, &u2); - rustsecp256k1zkp_v0_4_0_scalar_mul(&u11, &u9, &u2); - rustsecp256k1zkp_v0_4_0_scalar_mul(&u13, &u11, &u2); - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x6, &u13); - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x6, &x6); - rustsecp256k1zkp_v0_4_0_scalar_mul(&x6, &x6, &u11); - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x8, &x6); - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x8, &x8); - rustsecp256k1zkp_v0_4_0_scalar_mul(&x8, &x8, &x2); - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x14, &x8); - for (i = 0; i < 5; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x14, &x14); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(&x14, &x14, &x6); - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x28, &x14); - for (i = 0; i < 13; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x28, &x28); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(&x28, &x28, &x14); - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x56, &x28); - for (i = 0; i < 27; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x56, &x56); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(&x56, &x56, &x28); - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x112, &x56); - for (i = 0; i < 55; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x112, &x112); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(&x112, &x112, &x56); - - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x126, &x112); - for (i = 0; i < 13; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(&x126, &x126); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(&x126, &x126, &x14); - - /* Then accumulate the final result (t starts at x126). */ - t = &x126; - for (i = 0; i < 3; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 5; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */ - for (i = 0; i < 4; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 5; i++) { /* 00 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 6; i++) { /* 00 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 3; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 5; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */ - for (i = 0; i < 6; i++) { /* 000 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u5); /* 101 */ - for (i = 0; i < 10; i++) { /* 0000000 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 4; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x3); /* 111 */ - for (i = 0; i < 9; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x8); /* 11111111 */ - for (i = 0; i < 5; i++) { /* 0 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */ - for (i = 0; i < 6; i++) { /* 00 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u11); /* 1011 */ - for (i = 0; i < 4; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 5; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &x2); /* 11 */ - for (i = 0; i < 6; i++) { /* 00 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 10; i++) { /* 000000 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u13); /* 1101 */ - for (i = 0; i < 4; i++) { - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, &u9); /* 1001 */ - for (i = 0; i < 6; i++) { /* 00000 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(t, t, x); /* 1 */ - for (i = 0; i < 8; i++) { /* 00 */ - rustsecp256k1zkp_v0_4_0_scalar_sqr(t, t); - } - rustsecp256k1zkp_v0_4_0_scalar_mul(r, t, &x6); /* 111111 */ -} - -SECP256K1_INLINE static int rustsecp256k1zkp_v0_4_0_scalar_is_even(const rustsecp256k1zkp_v0_4_0_scalar *a) { - return !(a->d[0] & 1); -} -#endif - -static void rustsecp256k1zkp_v0_4_0_scalar_inverse_var(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { -#if defined(USE_SCALAR_INV_BUILTIN) - rustsecp256k1zkp_v0_4_0_scalar_inverse(r, x); -#elif defined(USE_SCALAR_INV_NUM) - unsigned char b[32]; - rustsecp256k1zkp_v0_4_0_num n, m; - rustsecp256k1zkp_v0_4_0_scalar t = *x; - rustsecp256k1zkp_v0_4_0_scalar_get_b32(b, &t); - rustsecp256k1zkp_v0_4_0_num_set_bin(&n, b, 32); - rustsecp256k1zkp_v0_4_0_scalar_order_get_num(&m); - rustsecp256k1zkp_v0_4_0_num_mod_inverse(&n, &n, &m); - rustsecp256k1zkp_v0_4_0_num_get_bin(b, 32, &n); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(r, b, NULL); - /* Verify that the inverse was computed correctly, without GMP code. */ - rustsecp256k1zkp_v0_4_0_scalar_mul(&t, &t, r); - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_one(&t)); -#else -#error "Please select scalar inverse implementation" -#endif -} - /* These parameters are generated using sage/gen_exhaustive_groups.sage. */ #if defined(EXHAUSTIVE_TEST_ORDER) # if EXHAUSTIVE_TEST_ORDER == 13 diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_low_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_low_impl.h index 06d6eaff..226bc1ff 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/scalar_low_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/scalar_low_impl.h @@ -105,10 +105,6 @@ static int rustsecp256k1zkp_v0_4_0_scalar_shr_int(rustsecp256k1zkp_v0_4_0_scalar return ret; } -static void rustsecp256k1zkp_v0_4_0_scalar_sqr(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *a) { - *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER; -} - static void rustsecp256k1zkp_v0_4_0_scalar_split_128(rustsecp256k1zkp_v0_4_0_scalar *r1, rustsecp256k1zkp_v0_4_0_scalar *r2, const rustsecp256k1zkp_v0_4_0_scalar *a) { *r1 = *a; *r2 = 0; @@ -131,4 +127,19 @@ SECP256K1_INLINE static void rustsecp256k1zkp_v0_4_0_scalar_chacha20(rustsecp256 *r2 = (seed[1] + n) % EXHAUSTIVE_TEST_ORDER; } +static void rustsecp256k1zkp_v0_4_0_scalar_inverse(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { + int i; + *r = 0; + for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) + if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1) + *r = i; + /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus + * have a composite group order; fix it in exhaustive_tests.c). */ + VERIFY_CHECK(*r != 0); +} + +static void rustsecp256k1zkp_v0_4_0_scalar_inverse_var(rustsecp256k1zkp_v0_4_0_scalar *r, const rustsecp256k1zkp_v0_4_0_scalar *x) { + rustsecp256k1zkp_v0_4_0_scalar_inverse(r, x); +} + #endif /* SECP256K1_SCALAR_REPR_IMPL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c b/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c index dd28dbaf..ff9417a0 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c @@ -4,12 +4,13 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#define SECP256K1_BUILD + +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "assumptions.h" #include "util.h" -#include "num_impl.h" #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" @@ -23,6 +24,10 @@ #include "scratch_impl.h" #include "selftest.h" +#ifdef SECP256K1_NO_BUILD +# error "secp256k1.h processed without SECP256K1_BUILD defined while building secp256k1.c" +#endif + #if defined(VALGRIND) #include #endif @@ -301,6 +306,32 @@ int rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize(const rustsecp256k1zkp_v0_4_0_co return ret; } +int rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_pubkey* pubkey0, const rustsecp256k1zkp_v0_4_0_pubkey* pubkey1) { + unsigned char out[2][33]; + const rustsecp256k1zkp_v0_4_0_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pubkey0; pk[1] = pubkey1; + for (i = 0; i < 2; i++) { + size_t out_size = sizeof(out[i]); + /* If the public key is NULL or invalid, ec_pubkey_serialize will call + * the illegal_callback and return 0. In that case we will serialize the + * key as all zeros which is less than any valid public key. This + * results in consistent comparisons even if NULL or invalid pubkeys are + * involved and prevents edge cases such as sorting algorithms that use + * this function and do not terminate as a result. */ + if (!rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + /* Note that ec_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return rustsecp256k1zkp_v0_4_0_memcmp_var(out[0], out[1], sizeof(out[0])); +} + static void rustsecp256k1zkp_v0_4_0_ecdsa_signature_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_scalar* r, rustsecp256k1zkp_v0_4_0_scalar* s, const rustsecp256k1zkp_v0_4_0_ecdsa_signature* sig) { (void)ctx; if (sizeof(rustsecp256k1zkp_v0_4_0_scalar) == 32) { @@ -772,6 +803,19 @@ int rustsecp256k1zkp_v0_4_0_ec_pubkey_combine(const rustsecp256k1zkp_v0_4_0_cont return 1; } +int rustsecp256k1zkp_v0_4_0_tagged_sha256(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { + rustsecp256k1zkp_v0_4_0_sha256 sha; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(hash32 != NULL); + ARG_CHECK(tag != NULL); + ARG_CHECK(msg != NULL); + + rustsecp256k1zkp_v0_4_0_sha256_initialize_tagged(&sha, tag, taglen); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg, msglen); + rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, hash32); + return 1; +} + #ifdef ENABLE_MODULE_ECDH #include "modules/ecdh/main_impl.h" #endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c.orig b/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c.orig index 29c253ca..842b1bf5 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c.orig +++ b/secp256k1-zkp-sys/depend/secp256k1/src/secp256k1.c.orig @@ -4,12 +4,13 @@ * file COPYING or https://www.opensource.org/licenses/mit-license.php.* ***********************************************************************/ -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#define SECP256K1_BUILD + +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "assumptions.h" #include "util.h" -#include "num_impl.h" #include "field_impl.h" #include "scalar_impl.h" #include "group_impl.h" @@ -23,6 +24,10 @@ #include "scratch_impl.h" #include "selftest.h" +#ifdef SECP256K1_NO_BUILD +# error "secp256k1.h processed without SECP256K1_BUILD defined while building secp256k1.c" +#endif + #if defined(VALGRIND) #include #endif @@ -340,6 +345,32 @@ int rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize(const rustsecp256k1zkp_v0_4_0_co return ret; } +int rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(const rustsecp256k1zkp_v0_4_0_context* ctx, const rustsecp256k1zkp_v0_4_0_pubkey* pubkey0, const rustsecp256k1zkp_v0_4_0_pubkey* pubkey1) { + unsigned char out[2][33]; + const rustsecp256k1zkp_v0_4_0_pubkey* pk[2]; + int i; + + VERIFY_CHECK(ctx != NULL); + pk[0] = pubkey0; pk[1] = pubkey1; + for (i = 0; i < 2; i++) { + size_t out_size = sizeof(out[i]); + /* If the public key is NULL or invalid, ec_pubkey_serialize will call + * the illegal_callback and return 0. In that case we will serialize the + * key as all zeros which is less than any valid public key. This + * results in consistent comparisons even if NULL or invalid pubkeys are + * involved and prevents edge cases such as sorting algorithms that use + * this function and do not terminate as a result. */ + if (!rustsecp256k1zkp_v0_4_0_ec_pubkey_serialize(ctx, out[i], &out_size, pk[i], SECP256K1_EC_COMPRESSED)) { + /* Note that ec_pubkey_serialize should already set the output to + * zero in that case, but it's not guaranteed by the API, we can't + * test it and writing a VERIFY_CHECK is more complex than + * explicitly memsetting (again). */ + memset(out[i], 0, sizeof(out[i])); + } + } + return rustsecp256k1zkp_v0_4_0_memcmp_var(out[0], out[1], sizeof(out[0])); +} + static void rustsecp256k1zkp_v0_4_0_ecdsa_signature_load(const rustsecp256k1zkp_v0_4_0_context* ctx, rustsecp256k1zkp_v0_4_0_scalar* r, rustsecp256k1zkp_v0_4_0_scalar* s, const rustsecp256k1zkp_v0_4_0_ecdsa_signature* sig) { (void)ctx; if (sizeof(rustsecp256k1zkp_v0_4_0_scalar) == 32) { @@ -811,6 +842,19 @@ int rustsecp256k1zkp_v0_4_0_ec_pubkey_combine(const rustsecp256k1zkp_v0_4_0_cont return 1; } +int rustsecp256k1zkp_v0_4_0_tagged_sha256(const rustsecp256k1zkp_v0_4_0_context* ctx, unsigned char *hash32, const unsigned char *tag, size_t taglen, const unsigned char *msg, size_t msglen) { + rustsecp256k1zkp_v0_4_0_sha256 sha; + VERIFY_CHECK(ctx != NULL); + ARG_CHECK(hash32 != NULL); + ARG_CHECK(tag != NULL); + ARG_CHECK(msg != NULL); + + rustsecp256k1zkp_v0_4_0_sha256_initialize_tagged(&sha, tag, taglen); + rustsecp256k1zkp_v0_4_0_sha256_write(&sha, msg, msglen); + rustsecp256k1zkp_v0_4_0_sha256_finalize(&sha, hash32); + return 1; +} + #ifdef ENABLE_MODULE_ECDH #include "modules/ecdh/main_impl.h" #endif diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/testrand_impl.h b/secp256k1-zkp-sys/depend/secp256k1/src/testrand_impl.h index 9bac0307..ae077d88 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/testrand_impl.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/testrand_impl.h @@ -145,7 +145,7 @@ static void rustsecp256k1zkp_v0_4_0_testrand_init(const char* hexseed) { pos++; } } else { - FILE *frand = fopen("/dev/urandom", "r"); + FILE *frand = fopen("/dev/urandom", "rb"); if ((frand == NULL) || fread(&seed16, 1, sizeof(seed16), frand) != sizeof(seed16)) { uint64_t t = time(NULL) * (uint64_t)1337; fprintf(stderr, "WARNING: could not read 16 bytes from /dev/urandom; falling back to insecure PRNG\n"); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/tests.c b/secp256k1-zkp-sys/depend/secp256k1/src/tests.c index 9372e4e2..f7a8938f 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/tests.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/tests.c @@ -15,22 +15,28 @@ #include #include "secp256k1.c" -#include "include/secp256k1.h" -#include "include/secp256k1_preallocated.h" +#include "../include/secp256k1.h" +#include "../include/secp256k1_preallocated.h" #include "testrand_impl.h" +#include "util.h" #ifdef ENABLE_OPENSSL_TESTS -#include "openssl/bn.h" -#include "openssl/ec.h" -#include "openssl/ecdsa.h" -#include "openssl/obj_mac.h" +#include +#include +#include +#include # if OPENSSL_VERSION_NUMBER < 0x10100000L void ECDSA_SIG_get0(const ECDSA_SIG *sig, const BIGNUM **pr, const BIGNUM **ps) {*pr = sig->r; *ps = sig->s;} # endif #endif -#include "contrib/lax_der_parsing.c" -#include "contrib/lax_der_privatekey_parsing.c" +#include "../contrib/lax_der_parsing.c" +#include "../contrib/lax_der_privatekey_parsing.c" + +#include "modinv32_impl.h" +#ifdef SECP256K1_WIDEMUL_INT128 +#include "modinv64_impl.h" +#endif static int count = 64; static rustsecp256k1zkp_v0_4_0_context *ctx = NULL; @@ -462,6 +468,25 @@ void run_scratch_tests(void) { rustsecp256k1zkp_v0_4_0_context_destroy(none); } +void run_ctz_tests(void) { + static const uint32_t b32[] = {1, 0xffffffff, 0x5e56968f, 0xe0d63129}; + static const uint64_t b64[] = {1, 0xffffffffffffffff, 0xbcd02462139b3fc3, 0x98b5f80c769693ef}; + int shift; + unsigned i; + for (i = 0; i < sizeof(b32) / sizeof(b32[0]); ++i) { + for (shift = 0; shift < 32; ++shift) { + CHECK(rustsecp256k1zkp_v0_4_0_ctz32_var_debruijn(b32[i] << shift) == shift); + CHECK(rustsecp256k1zkp_v0_4_0_ctz32_var(b32[i] << shift) == shift); + } + } + for (i = 0; i < sizeof(b64) / sizeof(b64[0]); ++i) { + for (shift = 0; shift < 64; ++shift) { + CHECK(rustsecp256k1zkp_v0_4_0_ctz64_var_debruijn(b64[i] << shift) == shift); + CHECK(rustsecp256k1zkp_v0_4_0_ctz64_var(b64[i] << shift) == shift); + } + } +} + /***** HASH TESTS *****/ void run_sha256_tests(void) { @@ -585,6 +610,38 @@ void run_rfc6979_hmac_sha256_tests(void) { rustsecp256k1zkp_v0_4_0_rfc6979_hmac_sha256_finalize(&rng); } +void run_tagged_sha256_tests(void) { + int ecount = 0; + rustsecp256k1zkp_v0_4_0_context *none = rustsecp256k1zkp_v0_4_0_context_create(SECP256K1_CONTEXT_NONE); + unsigned char tag[32] = { 0 }; + unsigned char msg[32] = { 0 }; + unsigned char hash32[32]; + unsigned char hash_expected[32] = { + 0x04, 0x7A, 0x5E, 0x17, 0xB5, 0x86, 0x47, 0xC1, + 0x3C, 0xC6, 0xEB, 0xC0, 0xAA, 0x58, 0x3B, 0x62, + 0xFB, 0x16, 0x43, 0x32, 0x68, 0x77, 0x40, 0x6C, + 0xE2, 0x76, 0x55, 0x9A, 0x3B, 0xDE, 0x55, 0xB3 + }; + + rustsecp256k1zkp_v0_4_0_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount); + + /* API test */ + CHECK(rustsecp256k1zkp_v0_4_0_tagged_sha256(none, hash32, tag, sizeof(tag), msg, sizeof(msg)) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_tagged_sha256(none, NULL, tag, sizeof(tag), msg, sizeof(msg)) == 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_tagged_sha256(none, hash32, NULL, 0, msg, sizeof(msg)) == 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_tagged_sha256(none, hash32, tag, sizeof(tag), NULL, 0) == 0); + CHECK(ecount == 3); + + /* Static test vector */ + memcpy(tag, "tag", 3); + memcpy(msg, "msg", 3); + CHECK(rustsecp256k1zkp_v0_4_0_tagged_sha256(none, hash32, tag, 3, msg, 3) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(hash32, hash_expected, sizeof(hash32)) == 0); + rustsecp256k1zkp_v0_4_0_context_destroy(none); +} + /***** RANDOM TESTS *****/ void test_rand_bits(int rand32, int bits) { @@ -657,202 +714,924 @@ void run_rand_int(void) { } } -/***** NUM TESTS *****/ +/***** MODINV TESTS *****/ + +/* Compute the modular inverse of (odd) x mod 2^64. */ +uint64_t modinv2p64(uint64_t x) { + /* If w = 1/x mod 2^(2^L), then w*(2 - w*x) = 1/x mod 2^(2^(L+1)). See + * Hacker's Delight second edition, Henry S. Warren, Jr., pages 245-247 for + * why. Start with L=0, for which it is true for every odd x that + * 1/x=1 mod 2. Iterating 6 times gives us 1/x mod 2^64. */ + int l; + uint64_t w = 1; + CHECK(x & 1); + for (l = 0; l < 6; ++l) w *= (2 - w*x); + return w; +} + +/* compute out = (a*b) mod m; if b=NULL, treat b=1. + * + * Out is a 512-bit number (represented as 32 uint16_t's in LE order). The other + * arguments are 256-bit numbers (represented as 16 uint16_t's in LE order). */ +void mulmod256(uint16_t* out, const uint16_t* a, const uint16_t* b, const uint16_t* m) { + uint16_t mul[32]; + uint64_t c = 0; + int i, j; + int m_bitlen = 0; + int mul_bitlen = 0; + + if (b != NULL) { + /* Compute the product of a and b, and put it in mul. */ + for (i = 0; i < 32; ++i) { + for (j = i <= 15 ? 0 : i - 15; j <= i && j <= 15; j++) { + c += (uint64_t)a[j] * b[i - j]; + } + mul[i] = c & 0xFFFF; + c >>= 16; + } + CHECK(c == 0); -#ifndef USE_NUM_NONE -void random_num_negate(rustsecp256k1zkp_v0_4_0_num *num) { - if (rustsecp256k1zkp_v0_4_0_testrand_bits(1)) { - rustsecp256k1zkp_v0_4_0_num_negate(num); + /* compute the highest set bit in mul */ + for (i = 511; i >= 0; --i) { + if ((mul[i >> 4] >> (i & 15)) & 1) { + mul_bitlen = i; + break; + } + } + } else { + /* if b==NULL, set mul=a. */ + memcpy(mul, a, 32); + memset(mul + 16, 0, 32); + /* compute the highest set bit in mul */ + for (i = 255; i >= 0; --i) { + if ((mul[i >> 4] >> (i & 15)) & 1) { + mul_bitlen = i; + break; + } + } + } + + /* Compute the highest set bit in m. */ + for (i = 255; i >= 0; --i) { + if ((m[i >> 4] >> (i & 15)) & 1) { + m_bitlen = i; + break; + } + } + + /* Try do mul -= m<= 0; --i) { + uint16_t mul2[32]; + int64_t cs; + + /* Compute mul2 = mul - m<= 0 && bitpos < 256) { + sub |= ((m[bitpos >> 4] >> (bitpos & 15)) & 1) << p; + } + } + /* Add mul[j]-sub to accumulator, and shift bottom 16 bits out to mul2[j]. */ + cs += mul[j]; + cs -= sub; + mul2[j] = (cs & 0xFFFF); + cs >>= 16; + } + /* If remainder of subtraction is 0, set mul = mul2. */ + if (cs == 0) { + memcpy(mul, mul2, sizeof(mul)); + } } + /* Sanity check: test that all limbs higher than m's highest are zero */ + for (i = (m_bitlen >> 4) + 1; i < 32; ++i) { + CHECK(mul[i] == 0); + } + memcpy(out, mul, 32); } -void random_num_order_test(rustsecp256k1zkp_v0_4_0_num *num) { - rustsecp256k1zkp_v0_4_0_scalar sc; - random_scalar_order_test(&sc); - rustsecp256k1zkp_v0_4_0_scalar_get_num(num, &sc); +/* Convert a 256-bit number represented as 16 uint16_t's to signed30 notation. */ +void uint16_to_signed30(rustsecp256k1zkp_v0_4_0_modinv32_signed30* out, const uint16_t* in) { + int i; + memset(out->v, 0, sizeof(out->v)); + for (i = 0; i < 256; ++i) { + out->v[i / 30] |= (int32_t)(((in[i >> 4]) >> (i & 15)) & 1) << (i % 30); + } } -void random_num_order(rustsecp256k1zkp_v0_4_0_num *num) { - rustsecp256k1zkp_v0_4_0_scalar sc; - random_scalar_order(&sc); - rustsecp256k1zkp_v0_4_0_scalar_get_num(num, &sc); -} - -void test_num_negate(void) { - rustsecp256k1zkp_v0_4_0_num n1; - rustsecp256k1zkp_v0_4_0_num n2; - random_num_order_test(&n1); /* n1 = R */ - random_num_negate(&n1); - rustsecp256k1zkp_v0_4_0_num_copy(&n2, &n1); /* n2 = R */ - rustsecp256k1zkp_v0_4_0_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */ - CHECK(rustsecp256k1zkp_v0_4_0_num_is_zero(&n1)); - rustsecp256k1zkp_v0_4_0_num_copy(&n1, &n2); /* n1 = R */ - rustsecp256k1zkp_v0_4_0_num_negate(&n1); /* n1 = -R */ - CHECK(!rustsecp256k1zkp_v0_4_0_num_is_zero(&n1)); - rustsecp256k1zkp_v0_4_0_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */ - CHECK(rustsecp256k1zkp_v0_4_0_num_is_zero(&n1)); - rustsecp256k1zkp_v0_4_0_num_copy(&n1, &n2); /* n1 = R */ - rustsecp256k1zkp_v0_4_0_num_negate(&n1); /* n1 = -R */ - CHECK(rustsecp256k1zkp_v0_4_0_num_is_neg(&n1) != rustsecp256k1zkp_v0_4_0_num_is_neg(&n2)); - rustsecp256k1zkp_v0_4_0_num_negate(&n1); /* n1 = R */ - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&n1, &n2)); -} - -void test_num_add_sub(void) { +/* Convert a 256-bit number in signed30 notation to a representation as 16 uint16_t's. */ +void signed30_to_uint16(uint16_t* out, const rustsecp256k1zkp_v0_4_0_modinv32_signed30* in) { int i; - rustsecp256k1zkp_v0_4_0_scalar s; - rustsecp256k1zkp_v0_4_0_num n1; - rustsecp256k1zkp_v0_4_0_num n2; - rustsecp256k1zkp_v0_4_0_num n1p2, n2p1, n1m2, n2m1; - random_num_order_test(&n1); /* n1 = R1 */ - if (rustsecp256k1zkp_v0_4_0_testrand_bits(1)) { - random_num_negate(&n1); + memset(out, 0, 32); + for (i = 0; i < 256; ++i) { + out[i >> 4] |= (((in->v[i / 30]) >> (i % 30)) & 1) << (i & 15); } - random_num_order_test(&n2); /* n2 = R2 */ - if (rustsecp256k1zkp_v0_4_0_testrand_bits(1)) { - random_num_negate(&n2); - } - rustsecp256k1zkp_v0_4_0_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */ - rustsecp256k1zkp_v0_4_0_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */ - rustsecp256k1zkp_v0_4_0_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */ - rustsecp256k1zkp_v0_4_0_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */ - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&n1p2, &n2p1)); - CHECK(!rustsecp256k1zkp_v0_4_0_num_eq(&n1p2, &n1m2)); - rustsecp256k1zkp_v0_4_0_num_negate(&n2m1); /* n2m1 = -R2 + R1 */ - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&n2m1, &n1m2)); - CHECK(!rustsecp256k1zkp_v0_4_0_num_eq(&n2m1, &n1)); - rustsecp256k1zkp_v0_4_0_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */ - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&n2m1, &n1)); - CHECK(!rustsecp256k1zkp_v0_4_0_num_eq(&n2p1, &n1)); - rustsecp256k1zkp_v0_4_0_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */ - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&n2p1, &n1)); - - /* check is_one */ - rustsecp256k1zkp_v0_4_0_scalar_set_int(&s, 1); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n1, &s); - CHECK(rustsecp256k1zkp_v0_4_0_num_is_one(&n1)); - /* check that 2^n + 1 is never 1 */ - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n2, &s); - for (i = 0; i < 250; ++i) { - rustsecp256k1zkp_v0_4_0_num_add(&n1, &n1, &n1); /* n1 *= 2 */ - rustsecp256k1zkp_v0_4_0_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */ - CHECK(!rustsecp256k1zkp_v0_4_0_num_is_one(&n1p2)); - } -} - -void test_num_mod(void) { +} + +/* Randomly mutate the sign of limbs in signed30 representation, without changing the value. */ +void mutate_sign_signed30(rustsecp256k1zkp_v0_4_0_modinv32_signed30* x) { int i; - rustsecp256k1zkp_v0_4_0_scalar s; - rustsecp256k1zkp_v0_4_0_num order, n; + for (i = 0; i < 16; ++i) { + int pos = rustsecp256k1zkp_v0_4_0_testrand_int(8); + if (x->v[pos] > 0 && x->v[pos + 1] <= 0x3fffffff) { + x->v[pos] -= 0x40000000; + x->v[pos + 1] += 1; + } else if (x->v[pos] < 0 && x->v[pos + 1] >= 0x3fffffff) { + x->v[pos] += 0x40000000; + x->v[pos + 1] -= 1; + } + } +} - /* check that 0 mod anything is 0 */ - random_scalar_order_test(&s); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&order, &s); - rustsecp256k1zkp_v0_4_0_scalar_set_int(&s, 0); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &s); - rustsecp256k1zkp_v0_4_0_num_mod(&n, &order); - CHECK(rustsecp256k1zkp_v0_4_0_num_is_zero(&n)); - - /* check that anything mod 1 is 0 */ - rustsecp256k1zkp_v0_4_0_scalar_set_int(&s, 1); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&order, &s); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &s); - rustsecp256k1zkp_v0_4_0_num_mod(&n, &order); - CHECK(rustsecp256k1zkp_v0_4_0_num_is_zero(&n)); - - /* check that increasing the number past 2^256 does not break this */ - random_scalar_order_test(&s); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &s); - /* multiply by 2^8, which'll test this case with high probability */ - for (i = 0; i < 8; ++i) { - rustsecp256k1zkp_v0_4_0_num_add(&n, &n, &n); +/* Test rustsecp256k1zkp_v0_4_0_modinv32{_var}, using inputs in 16-bit limb format, and returning inverse. */ +void test_modinv32_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { + uint16_t tmp[16]; + rustsecp256k1zkp_v0_4_0_modinv32_signed30 x; + rustsecp256k1zkp_v0_4_0_modinv32_modinfo m; + int i, vartime, nonzero; + + uint16_to_signed30(&x, in); + nonzero = (x.v[0] | x.v[1] | x.v[2] | x.v[3] | x.v[4] | x.v[5] | x.v[6] | x.v[7] | x.v[8]) != 0; + uint16_to_signed30(&m.modulus, mod); + mutate_sign_signed30(&m.modulus); + + /* compute 1/modulus mod 2^30 */ + m.modulus_inv30 = modinv2p64(m.modulus.v[0]) & 0x3fffffff; + CHECK(((m.modulus_inv30 * m.modulus.v[0]) & 0x3fffffff) == 1); + + for (vartime = 0; vartime < 2; ++vartime) { + /* compute inverse */ + (vartime ? rustsecp256k1zkp_v0_4_0_modinv32_var : rustsecp256k1zkp_v0_4_0_modinv32)(&x, &m); + + /* produce output */ + signed30_to_uint16(out, &x); + + /* check if the inverse times the input is 1 (mod m), unless x is 0. */ + mulmod256(tmp, out, in, mod); + CHECK(tmp[0] == nonzero); + for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); + + /* invert again */ + (vartime ? rustsecp256k1zkp_v0_4_0_modinv32_var : rustsecp256k1zkp_v0_4_0_modinv32)(&x, &m); + + /* check if the result is equal to the input */ + signed30_to_uint16(tmp, &x); + for (i = 0; i < 16; ++i) CHECK(tmp[i] == in[i]); + } +} + +#ifdef SECP256K1_WIDEMUL_INT128 +/* Convert a 256-bit number represented as 16 uint16_t's to signed62 notation. */ +void uint16_to_signed62(rustsecp256k1zkp_v0_4_0_modinv64_signed62* out, const uint16_t* in) { + int i; + memset(out->v, 0, sizeof(out->v)); + for (i = 0; i < 256; ++i) { + out->v[i / 62] |= (int64_t)(((in[i >> 4]) >> (i & 15)) & 1) << (i % 62); } - rustsecp256k1zkp_v0_4_0_num_mod(&n, &order); - CHECK(rustsecp256k1zkp_v0_4_0_num_is_zero(&n)); } -void test_num_jacobi(void) { - rustsecp256k1zkp_v0_4_0_scalar sqr; - rustsecp256k1zkp_v0_4_0_scalar small; - rustsecp256k1zkp_v0_4_0_scalar five; /* five is not a quadratic residue */ - rustsecp256k1zkp_v0_4_0_num order, n; +/* Convert a 256-bit number in signed62 notation to a representation as 16 uint16_t's. */ +void signed62_to_uint16(uint16_t* out, const rustsecp256k1zkp_v0_4_0_modinv64_signed62* in) { int i; - /* squares mod 5 are 1, 4 */ - const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 }; + memset(out, 0, 32); + for (i = 0; i < 256; ++i) { + out[i >> 4] |= (((in->v[i / 62]) >> (i % 62)) & 1) << (i & 15); + } +} - /* check some small values with 5 as the order */ - rustsecp256k1zkp_v0_4_0_scalar_set_int(&five, 5); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&order, &five); - for (i = 0; i < 10; ++i) { - rustsecp256k1zkp_v0_4_0_scalar_set_int(&small, i); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &small); - CHECK(rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &order) == jacobi5[i]); +/* Randomly mutate the sign of limbs in signed62 representation, without changing the value. */ +void mutate_sign_signed62(rustsecp256k1zkp_v0_4_0_modinv64_signed62* x) { + static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + int i; + for (i = 0; i < 8; ++i) { + int pos = rustsecp256k1zkp_v0_4_0_testrand_int(4); + if (x->v[pos] > 0 && x->v[pos + 1] <= M62) { + x->v[pos] -= (M62 + 1); + x->v[pos + 1] += 1; + } else if (x->v[pos] < 0 && x->v[pos + 1] >= -M62) { + x->v[pos] += (M62 + 1); + x->v[pos + 1] -= 1; + } } +} - /** test large values with 5 as group order */ - rustsecp256k1zkp_v0_4_0_scalar_get_num(&order, &five); - /* we first need a scalar which is not a multiple of 5 */ - do { - rustsecp256k1zkp_v0_4_0_num fiven; - random_scalar_order_test(&sqr); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&fiven, &five); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &sqr); - rustsecp256k1zkp_v0_4_0_num_mod(&n, &fiven); - } while (rustsecp256k1zkp_v0_4_0_num_is_zero(&n)); - /* next force it to be a residue. 2 is a nonresidue mod 5 so we can - * just multiply by two, i.e. add the number to itself */ - if (rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &order) == -1) { - rustsecp256k1zkp_v0_4_0_num_add(&n, &n, &n); - } - - /* test residue */ - CHECK(rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &order) == 1); - /* test nonresidue */ - rustsecp256k1zkp_v0_4_0_num_add(&n, &n, &n); - CHECK(rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &order) == -1); - - /** test with secp group order as order */ - rustsecp256k1zkp_v0_4_0_scalar_order_get_num(&order); - random_scalar_order_test(&sqr); - rustsecp256k1zkp_v0_4_0_scalar_sqr(&sqr, &sqr); - /* test residue */ - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &sqr); - CHECK(rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &order) == 1); - /* test nonresidue */ - rustsecp256k1zkp_v0_4_0_scalar_mul(&sqr, &sqr, &five); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &sqr); - CHECK(rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &order) == -1); - /* test multiple of the order*/ - CHECK(rustsecp256k1zkp_v0_4_0_num_jacobi(&order, &order) == 0); - - /* check one less than the order */ - rustsecp256k1zkp_v0_4_0_scalar_set_int(&small, 1); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&n, &small); - rustsecp256k1zkp_v0_4_0_num_sub(&n, &order, &n); - CHECK(rustsecp256k1zkp_v0_4_0_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */ -} - -void run_num_smalltests(void) { +/* Test rustsecp256k1zkp_v0_4_0_modinv64{_var}, using inputs in 16-bit limb format, and returning inverse. */ +void test_modinv64_uint16(uint16_t* out, const uint16_t* in, const uint16_t* mod) { + static const int64_t M62 = (int64_t)(UINT64_MAX >> 2); + uint16_t tmp[16]; + rustsecp256k1zkp_v0_4_0_modinv64_signed62 x; + rustsecp256k1zkp_v0_4_0_modinv64_modinfo m; + int i, vartime, nonzero; + + uint16_to_signed62(&x, in); + nonzero = (x.v[0] | x.v[1] | x.v[2] | x.v[3] | x.v[4]) != 0; + uint16_to_signed62(&m.modulus, mod); + mutate_sign_signed62(&m.modulus); + + /* compute 1/modulus mod 2^62 */ + m.modulus_inv62 = modinv2p64(m.modulus.v[0]) & M62; + CHECK(((m.modulus_inv62 * m.modulus.v[0]) & M62) == 1); + + for (vartime = 0; vartime < 2; ++vartime) { + /* compute inverse */ + (vartime ? rustsecp256k1zkp_v0_4_0_modinv64_var : rustsecp256k1zkp_v0_4_0_modinv64)(&x, &m); + + /* produce output */ + signed62_to_uint16(out, &x); + + /* check if the inverse times the input is 1 (mod m), unless x is 0. */ + mulmod256(tmp, out, in, mod); + CHECK(tmp[0] == nonzero); + for (i = 1; i < 16; ++i) CHECK(tmp[i] == 0); + + /* invert again */ + (vartime ? rustsecp256k1zkp_v0_4_0_modinv64_var : rustsecp256k1zkp_v0_4_0_modinv64)(&x, &m); + + /* check if the result is equal to the input */ + signed62_to_uint16(tmp, &x); + for (i = 0; i < 16; ++i) CHECK(tmp[i] == in[i]); + } +} +#endif + +/* test if a and b are coprime */ +int coprime(const uint16_t* a, const uint16_t* b) { + uint16_t x[16], y[16], t[16]; int i; - for (i = 0; i < 100*count; i++) { - test_num_negate(); - test_num_add_sub(); - test_num_mod(); - test_num_jacobi(); + int iszero; + memcpy(x, a, 32); + memcpy(y, b, 32); + + /* simple gcd loop: while x!=0, (x,y)=(y%x,x) */ + while (1) { + iszero = 1; + for (i = 0; i < 16; ++i) { + if (x[i] != 0) { + iszero = 0; + break; + } + } + if (iszero) break; + mulmod256(t, y, NULL, x); + memcpy(y, x, 32); + memcpy(x, t, 32); + } + + /* return whether y=1 */ + if (y[0] != 1) return 0; + for (i = 1; i < 16; ++i) { + if (y[i] != 0) return 0; } + return 1; } + +void run_modinv_tests(void) { + /* Fixed test cases. Each tuple is (input, modulus, output), each as 16x16 bits in LE order. */ + static const uint16_t CASES[][3][16] = { + /* Test cases triggering edge cases in divsteps */ + + /* Test case known to need 713 divsteps */ + {{0x1513, 0x5389, 0x54e9, 0x2798, 0x1957, 0x66a0, 0x8057, 0x3477, + 0x7784, 0x1052, 0x326a, 0x9331, 0x6506, 0xa95c, 0x91f3, 0xfb5e}, + {0x2bdd, 0x8df4, 0xcc61, 0x481f, 0xdae5, 0x5ca7, 0xf43b, 0x7d54, + 0x13d6, 0x469b, 0x2294, 0x20f4, 0xb2a4, 0xa2d1, 0x3ff1, 0xfd4b}, + {0xffd8, 0xd9a0, 0x456e, 0x81bb, 0xbabd, 0x6cea, 0x6dbd, 0x73ab, + 0xbb94, 0x3d3c, 0xdf08, 0x31c4, 0x3e32, 0xc179, 0x2486, 0xb86b}}, + /* Test case known to need 589 divsteps, reaching delta=-140 and + delta=141. */ + {{0x3fb1, 0x903b, 0x4eb7, 0x4813, 0xd863, 0x26bf, 0xd89f, 0xa8a9, + 0x02fe, 0x57c6, 0x554a, 0x4eab, 0x165e, 0x3d61, 0xee1e, 0x456c}, + {0x9295, 0x823b, 0x5c1f, 0x5386, 0x48e0, 0x02ff, 0x4c2a, 0xa2da, + 0xe58f, 0x967c, 0xc97e, 0x3f5a, 0x69fb, 0x52d9, 0x0a86, 0xb4a3}, + {0x3d30, 0xb893, 0xa809, 0xa7a8, 0x26f5, 0x5b42, 0x55be, 0xf4d0, + 0x12c2, 0x7e6a, 0xe41a, 0x90c7, 0xebfa, 0xf920, 0x304e, 0x1419}}, + /* Test case known to need 650 divsteps, and doing 65 consecutive (f,g/2) steps. */ + {{0x8583, 0x5058, 0xbeae, 0xeb69, 0x48bc, 0x52bb, 0x6a9d, 0xcc94, + 0x2a21, 0x87d5, 0x5b0d, 0x42f6, 0x5b8a, 0x2214, 0xe9d6, 0xa040}, + {0x7531, 0x27cb, 0x7e53, 0xb739, 0x6a5f, 0x83f5, 0xa45c, 0xcb1d, + 0x8a87, 0x1c9c, 0x51d7, 0x851c, 0xb9d8, 0x1fbe, 0xc241, 0xd4a3}, + {0xcdb4, 0x275c, 0x7d22, 0xa906, 0x0173, 0xc054, 0x7fdf, 0x5005, + 0x7fb8, 0x9059, 0xdf51, 0x99df, 0x2654, 0x8f6e, 0x070f, 0xb347}}, + /* example needing 713 divsteps; delta=-2..3 */ + {{0xe2e9, 0xee91, 0x4345, 0xe5ad, 0xf3ec, 0x8f42, 0x0364, 0xd5c9, + 0xff49, 0xbef5, 0x4544, 0x4c7c, 0xae4b, 0xfd9d, 0xb35b, 0xda9d}, + {0x36e7, 0x8cca, 0x2ed0, 0x47b3, 0xaca4, 0xb374, 0x7d2a, 0x0772, + 0x6bdb, 0xe0a7, 0x900b, 0xfe10, 0x788c, 0x6f22, 0xd909, 0xf298}, + {0xd8c6, 0xba39, 0x13ed, 0x198c, 0x16c8, 0xb837, 0xa5f2, 0x9797, + 0x0113, 0x882a, 0x15b5, 0x324c, 0xabee, 0xe465, 0x8170, 0x85ac}}, + /* example needing 713 divsteps; delta=-2..3 */ + {{0xd5b7, 0x2966, 0x040e, 0xf59a, 0x0387, 0xd96d, 0xbfbc, 0xd850, + 0x2d96, 0x872a, 0xad81, 0xc03c, 0xbb39, 0xb7fa, 0xd904, 0xef78}, + {0x6279, 0x4314, 0xfdd3, 0x1568, 0x0982, 0x4d13, 0x625f, 0x010c, + 0x22b1, 0x0cc3, 0xf22d, 0x5710, 0x1109, 0x5751, 0x7714, 0xfcf2}, + {0xdb13, 0x5817, 0x232e, 0xe456, 0xbbbc, 0x6fbe, 0x4572, 0xa358, + 0xc76d, 0x928e, 0x0162, 0x5314, 0x8325, 0x5683, 0xe21b, 0xda88}}, + /* example needing 713 divsteps; delta=-2..3 */ + {{0xa06f, 0x71ee, 0x3bac, 0x9ebb, 0xdeaa, 0x09ed, 0x1cf7, 0x9ec9, + 0x7158, 0x8b72, 0x5d53, 0x5479, 0x5c75, 0xbb66, 0x9125, 0xeccc}, + {0x2941, 0xd46c, 0x3cd4, 0x4a9d, 0x5c4a, 0x256b, 0xbd6c, 0x9b8e, + 0x8fe0, 0x8a14, 0xffe8, 0x2496, 0x618d, 0xa9d7, 0x5018, 0xfb29}, + {0x437c, 0xbd60, 0x7590, 0x94bb, 0x0095, 0xd35e, 0xd4fe, 0xd6da, + 0x0d4e, 0x5342, 0x4cd2, 0x169b, 0x661c, 0x1380, 0xed2d, 0x85c1}}, + /* example reaching delta=-64..65; 661 divsteps */ + {{0xfde4, 0x68d6, 0x6c48, 0x7f77, 0x1c78, 0x96de, 0x2fd9, 0xa6c2, + 0xbbb5, 0xd319, 0x69cf, 0xd4b3, 0xa321, 0xcda0, 0x172e, 0xe530}, + {0xd9e3, 0x0f60, 0x3d86, 0xeeab, 0x25ee, 0x9582, 0x2d50, 0xfe16, + 0xd4e2, 0xe3ba, 0x94e2, 0x9833, 0x6c5e, 0x8982, 0x13b6, 0xe598}, + {0xe675, 0xf55a, 0x10f6, 0xabde, 0x5113, 0xecaa, 0x61ae, 0xad9f, + 0x0c27, 0xef33, 0x62e5, 0x211d, 0x08fa, 0xa78d, 0xc675, 0x8bae}}, + /* example reaching delta=-64..65; 661 divsteps */ + {{0x21bf, 0x52d5, 0x8fd4, 0xaa18, 0x156a, 0x7247, 0xebb8, 0x5717, + 0x4eb5, 0x1421, 0xb58f, 0x3b0b, 0x5dff, 0xe533, 0xb369, 0xd28a}, + {0x9f6b, 0xe463, 0x2563, 0xc74d, 0x6d81, 0x636a, 0x8fc8, 0x7a94, + 0x9429, 0x1585, 0xf35e, 0x7ff5, 0xb64f, 0x9720, 0xba74, 0xe108}, + {0xa5ab, 0xea7b, 0xfe5e, 0x8a85, 0x13be, 0x7934, 0xe8a0, 0xa187, + 0x86b5, 0xe477, 0xb9a4, 0x75d7, 0x538f, 0xdd70, 0xc781, 0xb67d}}, + /* example reaching delta=-64..65; 661 divsteps */ + {{0xa41a, 0x3e8d, 0xf1f5, 0x9493, 0x868c, 0x5103, 0x2725, 0x3ceb, + 0x6032, 0x3624, 0xdc6b, 0x9120, 0xbf4c, 0x8821, 0x91ad, 0xb31a}, + {0x5c0b, 0xdda5, 0x20f8, 0x32a1, 0xaf73, 0x6ec5, 0x4779, 0x43d6, + 0xd454, 0x9573, 0xbf84, 0x5a58, 0xe04e, 0x307e, 0xd1d5, 0xe230}, + {0xda15, 0xbcd6, 0x7180, 0xabd3, 0x04e6, 0x6986, 0xc0d7, 0x90bb, + 0x3a4d, 0x7c95, 0xaaab, 0x9ab3, 0xda34, 0xa7f6, 0x9636, 0x6273}}, + /* example doing 123 consecutive (f,g/2) steps; 615 divsteps */ + {{0xb4d6, 0xb38f, 0x00aa, 0xebda, 0xd4c2, 0x70b8, 0x9dad, 0x58ee, + 0x68f8, 0x48d3, 0xb5ff, 0xf422, 0x9e46, 0x2437, 0x18d0, 0xd9cc}, + {0x5c83, 0xfed7, 0x97f5, 0x3f07, 0xcaad, 0x95b1, 0xb4a4, 0xb005, + 0x23af, 0xdd27, 0x6c0d, 0x932c, 0xe2b2, 0xe3ae, 0xfb96, 0xdf67}, + {0x3105, 0x0127, 0xfd48, 0x039b, 0x35f1, 0xbc6f, 0x6c0a, 0xb572, + 0xe4df, 0xebad, 0x8edc, 0xb89d, 0x9555, 0x4c26, 0x1fef, 0x997c}}, + /* example doing 123 consecutive (f,g/2) steps; 614 divsteps */ + {{0x5138, 0xd474, 0x385f, 0xc964, 0x00f2, 0x6df7, 0x862d, 0xb185, + 0xb264, 0xe9e1, 0x466c, 0xf39e, 0xafaf, 0x5f41, 0x47e2, 0xc89d}, + {0x8607, 0x9c81, 0x46a2, 0x7dcc, 0xcb0c, 0x9325, 0xe149, 0x2bde, + 0x6632, 0x2869, 0xa261, 0xb163, 0xccee, 0x22ae, 0x91e0, 0xcfd5}, + {0x831c, 0xda22, 0xb080, 0xba7a, 0x26e2, 0x54b0, 0x073b, 0x5ea0, + 0xed4b, 0xcb3d, 0xbba1, 0xbec8, 0xf2ad, 0xae0d, 0x349b, 0x17d1}}, + /* example doing 123 consecutive (f,g/2) steps; 614 divsteps */ + {{0xe9a5, 0xb4ad, 0xd995, 0x9953, 0xcdff, 0x50d7, 0xf715, 0x9dc7, + 0x3e28, 0x15a9, 0x95a3, 0x8554, 0x5b5e, 0xad1d, 0x6d57, 0x3d50}, + {0x3ad9, 0xbd60, 0x5cc7, 0x6b91, 0xadeb, 0x71f6, 0x7cc4, 0xa58a, + 0x2cce, 0xf17c, 0x38c9, 0x97ed, 0x65fb, 0x3fa6, 0xa6bc, 0xeb24}, + {0xf96c, 0x1963, 0x8151, 0xa0cc, 0x299b, 0xf277, 0x001a, 0x16bb, + 0xfd2e, 0x532d, 0x0410, 0xe117, 0x6b00, 0x44ec, 0xca6a, 0x1745}}, + /* example doing 446 (f,g/2) steps; 523 divsteps */ + {{0x3758, 0xa56c, 0xe41e, 0x4e47, 0x0975, 0xa82b, 0x107c, 0x89cf, + 0x2093, 0x5a0c, 0xda37, 0xe007, 0x6074, 0x4f68, 0x2f5a, 0xbb8a}, + {0x4beb, 0xa40f, 0x2c42, 0xd9d6, 0x97e8, 0xca7c, 0xd395, 0x894f, + 0x1f50, 0x8067, 0xa233, 0xb850, 0x1746, 0x1706, 0xbcda, 0xdf32}, + {0x762a, 0xceda, 0x4c45, 0x1ca0, 0x8c37, 0xd8c5, 0xef57, 0x7a2c, + 0x6e98, 0xe38a, 0xc50e, 0x2ca9, 0xcb85, 0x24d5, 0xc29c, 0x61f6}}, + /* example doing 446 (f,g/2) steps; 523 divsteps */ + {{0x6f38, 0x74ad, 0x7332, 0x4073, 0x6521, 0xb876, 0xa370, 0xa6bd, + 0xcea5, 0xbd06, 0x969f, 0x77c6, 0x1e69, 0x7c49, 0x7d51, 0xb6e7}, + {0x3f27, 0x4be4, 0xd81e, 0x1396, 0xb21f, 0x92aa, 0x6dc3, 0x6283, + 0x6ada, 0x3ca2, 0xc1e5, 0x8b9b, 0xd705, 0x5598, 0x8ba1, 0xe087}, + {0x6a22, 0xe834, 0xbc8d, 0xcee9, 0x42fc, 0xfc77, 0x9c45, 0x1ca8, + 0xeb66, 0xed74, 0xaaf9, 0xe75f, 0xfe77, 0x46d2, 0x179b, 0xbf3e}}, + /* example doing 336 (f,(f+g)/2) steps; 693 divsteps */ + {{0x7ea7, 0x444e, 0x84ea, 0xc447, 0x7c1f, 0xab97, 0x3de6, 0x5878, + 0x4e8b, 0xc017, 0x03e0, 0xdc40, 0xbbd0, 0x74ce, 0x0169, 0x7ab5}, + {0x4023, 0x154f, 0xfbe4, 0x8195, 0xfda0, 0xef54, 0x9e9a, 0xc703, + 0x2803, 0xf760, 0x6302, 0xed5b, 0x7157, 0x6456, 0xdd7d, 0xf14b}, + {0xb6fb, 0xe3b3, 0x0733, 0xa77e, 0x44c5, 0x3003, 0xc937, 0xdd4d, + 0x5355, 0x14e9, 0x184e, 0xcefe, 0xe6b5, 0xf2e0, 0x0a28, 0x5b74}}, + /* example doing 336 (f,(f+g)/2) steps; 687 divsteps */ + {{0xa893, 0xb5f4, 0x1ede, 0xa316, 0x242c, 0xbdcc, 0xb017, 0x0836, + 0x3a37, 0x27fb, 0xfb85, 0x251e, 0xa189, 0xb15d, 0xa4b8, 0xc24c}, + {0xb0b7, 0x57ba, 0xbb6d, 0x9177, 0xc896, 0xc7f2, 0x43b4, 0x85a6, + 0xe6c4, 0xe50e, 0x3109, 0x7ca5, 0xd73d, 0x13ff, 0x0c3d, 0xcd62}, + {0x48ca, 0xdb34, 0xe347, 0x2cef, 0x4466, 0x10fb, 0x7ee1, 0x6344, + 0x4308, 0x966d, 0xd4d1, 0xb099, 0x994f, 0xd025, 0x2187, 0x5866}}, + /* example doing 267 (g,(g-f)/2) steps; 678 divsteps */ + {{0x0775, 0x1754, 0x01f6, 0xdf37, 0xc0be, 0x8197, 0x072f, 0x6cf5, + 0x8b36, 0x8069, 0x5590, 0xb92d, 0x6084, 0x47a4, 0x23fe, 0xddd5}, + {0x8e1b, 0xda37, 0x27d9, 0x312e, 0x3a2f, 0xef6d, 0xd9eb, 0x8153, + 0xdcba, 0x9fa3, 0x9f80, 0xead5, 0x134d, 0x2ebb, 0x5ec0, 0xe032}, + {0x1cb6, 0x5a61, 0x1bed, 0x77d6, 0xd5d1, 0x7498, 0xef33, 0x2dd2, + 0x1089, 0xedbd, 0x6958, 0x16ae, 0x336c, 0x45e6, 0x4361, 0xbadc}}, + /* example doing 267 (g,(g-f)/2) steps; 676 divsteps */ + {{0x0207, 0xf948, 0xc430, 0xf36b, 0xf0a7, 0x5d36, 0x751f, 0x132c, + 0x6f25, 0xa630, 0xca1f, 0xc967, 0xaf9c, 0x34e7, 0xa38f, 0xbe9f}, + {0x5fb9, 0x7321, 0x6561, 0x5fed, 0x54ec, 0x9c3a, 0xee0e, 0x6717, + 0x49af, 0xb896, 0xf4f5, 0x451c, 0x722a, 0xf116, 0x64a9, 0xcf0b}, + {0xf4d7, 0xdb47, 0xfef2, 0x4806, 0x4cb8, 0x18c7, 0xd9a7, 0x4951, + 0x14d8, 0x5c3a, 0xd22d, 0xd7b2, 0x750c, 0x3de7, 0x8b4a, 0x19aa}}, + + /* Test cases triggering edge cases in divsteps variant starting with delta=1/2 */ + + /* example needing 590 divsteps; delta=-5/2..7/2 */ + {{0x9118, 0xb640, 0x53d7, 0x30ab, 0x2a23, 0xd907, 0x9323, 0x5b3a, + 0xb6d4, 0x538a, 0x7637, 0xfe97, 0xfd05, 0x3cc0, 0x453a, 0xfb7e}, + {0x6983, 0x4f75, 0x4ad1, 0x48ad, 0xb2d9, 0x521d, 0x3dbc, 0x9cc0, + 0x4b60, 0x0ac6, 0xd3be, 0x0fb6, 0xd305, 0x3895, 0x2da5, 0xfdf8}, + {0xcec1, 0x33ac, 0xa801, 0x8194, 0xe36c, 0x65ef, 0x103b, 0xca54, + 0xfa9b, 0xb41d, 0x9b52, 0xb6f7, 0xa611, 0x84aa, 0x3493, 0xbf54}}, + /* example needing 590 divsteps; delta=-3/2..5/2 */ + {{0xb5f2, 0x42d0, 0x35e8, 0x8ca0, 0x4b62, 0x6e1d, 0xbdf3, 0x890e, + 0x8c82, 0x23d8, 0xc79a, 0xc8e8, 0x789e, 0x353d, 0x9766, 0xea9d}, + {0x6fa1, 0xacba, 0x4b7a, 0x5de1, 0x95d0, 0xc845, 0xebbf, 0x6f5a, + 0x30cf, 0x52db, 0x69b7, 0xe278, 0x4b15, 0x8411, 0x2ab2, 0xf3e7}, + {0xf12c, 0x9d6d, 0x95fa, 0x1878, 0x9f13, 0x4fb5, 0x3c8b, 0xa451, + 0x7182, 0xc4b6, 0x7e2a, 0x7bb7, 0x6e0e, 0x5b68, 0xde55, 0x9927}}, + /* example needing 590 divsteps; delta=-3/2..5/2 */ + {{0x229c, 0x4ef8, 0x1e93, 0xe5dc, 0xcde5, 0x6d62, 0x263b, 0xad11, + 0xced0, 0x88ff, 0xae8e, 0x3183, 0x11d2, 0xa50b, 0x350d, 0xeb40}, + {0x3157, 0xe2ea, 0x8a02, 0x0aa3, 0x5ae1, 0xb26c, 0xea27, 0x6805, + 0x87e2, 0x9461, 0x37c1, 0x2f8d, 0x85d2, 0x77a8, 0xf805, 0xeec9}, + {0x6f4e, 0x2748, 0xf7e5, 0xd8d3, 0xabe2, 0x7270, 0xc4e0, 0xedc7, + 0xf196, 0x78ca, 0x9139, 0xd8af, 0x72c6, 0xaf2f, 0x85d2, 0x6cd3}}, + /* example needing 590 divsteps; delta=-5/2..7/2 */ + {{0xdce8, 0xf1fe, 0x6708, 0x021e, 0xf1ca, 0xd609, 0x5443, 0x85ce, + 0x7a05, 0x8f9c, 0x90c3, 0x52e7, 0x8e1d, 0x97b8, 0xc0bf, 0xf2a1}, + {0xbd3d, 0xed11, 0x1625, 0xb4c5, 0x844c, 0xa413, 0x2569, 0xb9ba, + 0xcd35, 0xff84, 0xcd6e, 0x7f0b, 0x7d5d, 0x10df, 0x3efe, 0xfbe5}, + {0xa9dd, 0xafef, 0xb1b7, 0x4c8d, 0x50e4, 0xafbf, 0x2d5a, 0xb27c, + 0x0653, 0x66b6, 0x5d36, 0x4694, 0x7e35, 0xc47c, 0x857f, 0x32c5}}, + /* example needing 590 divsteps; delta=-3/2..5/2 */ + {{0x7902, 0xc9f8, 0x926b, 0xaaeb, 0x90f8, 0x1c89, 0xcce3, 0x96b7, + 0x28b2, 0x87a2, 0x136d, 0x695a, 0xa8df, 0x9061, 0x9e31, 0xee82}, + {0xd3a9, 0x3c02, 0x818c, 0x6b81, 0x34b3, 0xebbb, 0xe2c8, 0x7712, + 0xbfd6, 0x8248, 0xa6f4, 0xba6f, 0x03bb, 0xfb54, 0x7575, 0xfe89}, + {0x8246, 0x0d63, 0x478e, 0xf946, 0xf393, 0x0451, 0x08c2, 0x5919, + 0x5fd6, 0x4c61, 0xbeb7, 0x9a15, 0x30e1, 0x55fc, 0x6a01, 0x3724}}, + /* example reaching delta=-127/2..129/2; 571 divsteps */ + {{0x3eff, 0x926a, 0x77f5, 0x1fff, 0x1a5b, 0xf3ef, 0xf64b, 0x8681, + 0xf800, 0xf9bc, 0x761d, 0xe268, 0x62b0, 0xa032, 0xba9c, 0xbe56}, + {0xb8f9, 0x00e7, 0x47b7, 0xdffc, 0xfd9d, 0x5abb, 0xa19b, 0x1868, + 0x31fd, 0x3b29, 0x3674, 0x5449, 0xf54d, 0x1d19, 0x6ac7, 0xff6f}, + {0xf1d7, 0x3551, 0x5682, 0x9adf, 0xe8aa, 0x19a5, 0x8340, 0x71db, + 0xb7ab, 0x4cfd, 0xf661, 0x632c, 0xc27e, 0xd3c6, 0xdf42, 0xd306}}, + /* example reaching delta=-127/2..129/2; 571 divsteps */ + {{0x0000, 0x0000, 0x0000, 0x0000, 0x3aff, 0x2ed7, 0xf2e0, 0xabc7, + 0x8aee, 0x166e, 0x7ed0, 0x9ac7, 0x714a, 0xb9c5, 0x4d58, 0xad6c}, + {0x9cf9, 0x47e2, 0xa421, 0xb277, 0xffc2, 0x2747, 0x6486, 0x94c1, + 0x1d99, 0xd49b, 0x1096, 0x991a, 0xe986, 0xae02, 0xe89b, 0xea36}, + {0x1fb4, 0x98d8, 0x19b7, 0x80e9, 0xcdac, 0xaa5a, 0xf1e6, 0x0074, + 0xe393, 0xed8b, 0x8d5c, 0xe17d, 0x81b3, 0xc16d, 0x54d3, 0x9be3}}, + /* example reaching delta=-127/2..129/2; 571 divsteps */ + {{0xd047, 0x7e36, 0x3157, 0x7ab6, 0xb4d9, 0x8dae, 0x7534, 0x4f5d, + 0x489e, 0xa8ab, 0x8a3d, 0xd52c, 0x62af, 0xa032, 0xba9c, 0xbe56}, + {0xb1f1, 0x737f, 0x5964, 0x5afb, 0x3712, 0x8ef9, 0x19f7, 0x9669, + 0x664d, 0x03ad, 0xc352, 0xf7a5, 0xf545, 0x1d19, 0x6ac7, 0xff6f}, + {0xa834, 0x5256, 0x27bc, 0x33bd, 0xba11, 0x5a7b, 0x791e, 0xe6c0, + 0x9ac4, 0x9370, 0x1130, 0x28b4, 0x2b2e, 0x231b, 0x082a, 0x796e}}, + /* example doing 123 consecutive (f,g/2) steps; 554 divsteps */ + {{0x6ab1, 0x6ea0, 0x1a99, 0xe0c2, 0xdd45, 0x645d, 0x8dbc, 0x466a, + 0xfa64, 0x4289, 0xd3f7, 0xfc8f, 0x2894, 0xe3c5, 0xa008, 0xcc14}, + {0xc75f, 0xc083, 0x4cc2, 0x64f2, 0x2aff, 0x4c12, 0x8461, 0xc4ae, + 0xbbfa, 0xb336, 0xe4b2, 0x3ac5, 0x2c22, 0xf56c, 0x5381, 0xe943}, + {0xcd80, 0x760d, 0x4395, 0xb3a6, 0xd497, 0xf583, 0x82bd, 0x1daa, + 0xbe92, 0x2613, 0xfdfb, 0x869b, 0x0425, 0xa333, 0x7056, 0xc9c5}}, + /* example doing 123 consecutive (f,g/2) steps; 554 divsteps */ + {{0x71d4, 0x64df, 0xec4f, 0x74d8, 0x7e0c, 0x40d3, 0x7073, 0x4cc8, + 0x2a2a, 0xb1ff, 0x8518, 0x6513, 0xb0ea, 0x640a, 0x62d9, 0xd5f4}, + {0xdc75, 0xd937, 0x3b13, 0x1d36, 0xdf83, 0xd034, 0x1c1c, 0x4332, + 0x4cc3, 0xeeec, 0x7d94, 0x6771, 0x3384, 0x74b0, 0x947d, 0xf2c4}, + {0x0a82, 0x37a4, 0x12d5, 0xec97, 0x972c, 0xe6bf, 0xc348, 0xa0a9, + 0xc50c, 0xdc7c, 0xae30, 0x19d1, 0x0fca, 0x35e1, 0xd6f6, 0x81ee}}, + /* example doing 123 consecutive (f,g/2) steps; 554 divsteps */ + {{0xa6b1, 0xabc5, 0x5bbc, 0x7f65, 0xdd32, 0xaa73, 0xf5a3, 0x1982, + 0xced4, 0xe949, 0x0fd6, 0x2bc4, 0x2bd7, 0xe3c5, 0xa008, 0xcc14}, + {0x4b5f, 0x8f96, 0xa375, 0xfbcf, 0x1c7d, 0xf1ec, 0x03f5, 0xb35d, + 0xb999, 0xdb1f, 0xc9a1, 0xb4c7, 0x1dd5, 0xf56c, 0x5381, 0xe943}, + {0xaa3d, 0x38b9, 0xf17d, 0xeed9, 0x9988, 0x69ee, 0xeb88, 0x1495, + 0x203f, 0x18c8, 0x82b7, 0xdcb2, 0x34a7, 0x6b00, 0x6998, 0x589a}}, + /* example doing 453 (f,g/2) steps; 514 divsteps */ + {{0xa478, 0xe60d, 0x3244, 0x60e6, 0xada3, 0xfe50, 0xb6b1, 0x2eae, + 0xd0ef, 0xa7b1, 0xef63, 0x05c0, 0xe213, 0x443e, 0x4427, 0x2448}, + {0x258f, 0xf9ef, 0xe02b, 0x92dd, 0xd7f3, 0x252b, 0xa503, 0x9089, + 0xedff, 0x96c1, 0xfe3a, 0x3a39, 0x198a, 0x981d, 0x0627, 0xedb7}, + {0x595a, 0x45be, 0x8fb0, 0x2265, 0xc210, 0x02b8, 0xdce9, 0xe241, + 0xcab6, 0xbf0d, 0x0049, 0x8d9a, 0x2f51, 0xae54, 0x5785, 0xb411}}, + /* example doing 453 (f,g/2) steps; 514 divsteps */ + {{0x48f0, 0x7db3, 0xdafe, 0x1c92, 0x5912, 0xe11a, 0xab52, 0xede1, + 0x3182, 0x8980, 0x5d2b, 0x9b5b, 0x8718, 0xda27, 0x1683, 0x1de2}, + {0x168f, 0x6f36, 0xce7a, 0xf435, 0x19d4, 0xda5e, 0x2351, 0x9af5, + 0xb003, 0x0ef5, 0x3b4c, 0xecec, 0xa9f0, 0x78e1, 0xdfef, 0xe823}, + {0x5f55, 0xfdcc, 0xb233, 0x2914, 0x84f0, 0x97d1, 0x9cf4, 0x2159, + 0xbf56, 0xb79c, 0x17a3, 0x7cef, 0xd5de, 0x34f0, 0x5311, 0x4c54}}, + /* example doing 510 (f,(f+g)/2) steps; 512 divsteps */ + {{0x2789, 0x2e04, 0x6e0e, 0xb6cd, 0xe4de, 0x4dbf, 0x228d, 0x7877, + 0xc335, 0x806b, 0x38cd, 0x8049, 0xa73b, 0xcfa2, 0x82f7, 0x9e19}, + {0xc08d, 0xb99d, 0xb8f3, 0x663d, 0xbbb3, 0x1284, 0x1485, 0x1d49, + 0xc98f, 0x9e78, 0x1588, 0x11e3, 0xd91a, 0xa2c7, 0xfff1, 0xc7b9}, + {0x1e1f, 0x411d, 0x7c49, 0x0d03, 0xe789, 0x2f8e, 0x5d55, 0xa95e, + 0x826e, 0x8de5, 0x52a0, 0x1abc, 0x4cd7, 0xd13a, 0x4395, 0x63e1}}, + /* example doing 510 (f,(f+g)/2) steps; 512 divsteps */ + {{0xd5a1, 0xf786, 0x555c, 0xb14b, 0x44ae, 0x535f, 0x4a49, 0xffc3, + 0xf497, 0x70d1, 0x57c8, 0xa933, 0xc85a, 0x1910, 0x75bf, 0x960b}, + {0xfe53, 0x5058, 0x496d, 0xfdff, 0x6fb8, 0x4100, 0x92bd, 0xe0c4, + 0xda89, 0xe0a4, 0x841b, 0x43d4, 0xa388, 0x957f, 0x99ca, 0x9abf}, + {0xe530, 0x05bc, 0xfeec, 0xfc7e, 0xbcd3, 0x1239, 0x54cb, 0x7042, + 0xbccb, 0x139e, 0x9076, 0x0203, 0x6068, 0x90c7, 0x1ddf, 0x488d}}, + /* example doing 228 (g,(g-f)/2) steps; 538 divsteps */ + {{0x9488, 0xe54b, 0x0e43, 0x81d2, 0x06e7, 0x4b66, 0x36d0, 0x53d6, + 0x2b68, 0x22ec, 0x3fa9, 0xc1a7, 0x9ad2, 0xa596, 0xb3ac, 0xdf42}, + {0xe31f, 0x0b28, 0x5f3b, 0xc1ff, 0x344c, 0xbf5f, 0xd2ec, 0x2936, + 0x9995, 0xdeb2, 0xae6c, 0x2852, 0xa2c6, 0xb306, 0x8120, 0xe305}, + {0xa56e, 0xfb98, 0x1537, 0x4d85, 0x619e, 0x866c, 0x3cd4, 0x779a, + 0xdd66, 0xa80d, 0xdc2f, 0xcae4, 0xc74c, 0x5175, 0xa65d, 0x605e}}, + /* example doing 228 (g,(g-f)/2) steps; 537 divsteps */ + {{0x8cd5, 0x376d, 0xd01b, 0x7176, 0x19ef, 0xcf09, 0x8403, 0x5e52, + 0x83c1, 0x44de, 0xb91e, 0xb33d, 0xe15c, 0x51e7, 0xbad8, 0x6359}, + {0x3b75, 0xf812, 0x5f9e, 0xa04e, 0x92d3, 0x226e, 0x540e, 0x7c9a, + 0x31c6, 0x46d2, 0x0b7b, 0xdb4a, 0xe662, 0x4950, 0x0265, 0xf76f}, + {0x09ed, 0x692f, 0xe8f1, 0x3482, 0xab54, 0x36b4, 0x8442, 0x6ae9, + 0x4329, 0x6505, 0x183b, 0x1c1d, 0x482d, 0x7d63, 0xb44f, 0xcc09}}, + + /* Test cases with the group order as modulus. */ + + /* Test case with the group order as modulus, needing 635 divsteps. */ + {{0x95ed, 0x6c01, 0xd113, 0x5ff1, 0xd7d0, 0x29cc, 0x5817, 0x6120, + 0xca8e, 0xaad1, 0x25ae, 0x8e84, 0x9af6, 0x30bf, 0xf0ed, 0x1686}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x1631, 0xbf4a, 0x286a, 0x2716, 0x469f, 0x2ac8, 0x1312, 0xe9bc, + 0x04f4, 0x304b, 0x9931, 0x113b, 0xd932, 0xc8f4, 0x0d0d, 0x01a1}}, + /* example with group size as modulus needing 631 divsteps */ + {{0x85ed, 0xc284, 0x9608, 0x3c56, 0x19b6, 0xbb5b, 0x2850, 0xdab7, + 0xa7f5, 0xe9ab, 0x06a4, 0x5bbb, 0x1135, 0xa186, 0xc424, 0xc68b}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x8479, 0x450a, 0x8fa3, 0xde05, 0xb2f5, 0x7793, 0x7269, 0xbabb, + 0xc3b3, 0xd49b, 0x3377, 0x03c6, 0xe694, 0xc760, 0xd3cb, 0x2811}}, + /* example with group size as modulus needing 565 divsteps starting at delta=1/2 */ + {{0x8432, 0x5ceb, 0xa847, 0x6f1e, 0x51dd, 0x535a, 0x6ddc, 0x70ce, + 0x6e70, 0xc1f6, 0x18f2, 0x2a7e, 0xc8e7, 0x39f8, 0x7e96, 0xebbf}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x257e, 0x449f, 0x689f, 0x89aa, 0x3989, 0xb661, 0x376c, 0x1e32, + 0x654c, 0xee2e, 0xf4e2, 0x33c8, 0x3f2f, 0x9716, 0x6046, 0xcaa3}}, + /* Test case with the group size as modulus, needing 981 divsteps with + broken eta handling. */ + {{0xfeb9, 0xb877, 0xee41, 0x7fa3, 0x87da, 0x94c4, 0x9d04, 0xc5ae, + 0x5708, 0x0994, 0xfc79, 0x0916, 0xbf32, 0x3ad8, 0xe11c, 0x5ca2}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0f12, 0x075e, 0xce1c, 0x6f92, 0xc80f, 0xca92, 0x9a04, 0x6126, + 0x4b6c, 0x57d6, 0xca31, 0x97f3, 0x1f99, 0xf4fd, 0xda4d, 0x42ce}}, + /* Test case with the group size as modulus, input = 0. */ + {{0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the group size as modulus, input = 1. */ + {{0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the group size as modulus, input = 2. */ + {{0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x20a1, 0x681b, 0x2f46, 0xdfe9, 0x501d, 0x57a4, 0x6e73, 0x5d57, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x7fff}}, + /* Test case with the group size as modulus, input = group - 1. */ + {{0x4140, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x4141, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x4140, 0xd036, 0x5e8c, 0xbfd2, 0xa03b, 0xaf48, 0xdce6, 0xbaae, + 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}}, + + /* Test cases with the field size as modulus. */ + + /* Test case with the field size as modulus, needing 637 divsteps. */ + {{0x9ec3, 0x1919, 0xca84, 0x7c11, 0xf996, 0x06f3, 0x5408, 0x6688, + 0x1320, 0xdb8a, 0x632a, 0x0dcb, 0x8a84, 0x6bee, 0x9c95, 0xe34e}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x18e5, 0x19b6, 0xdf92, 0x1aaa, 0x09fb, 0x8a3f, 0x52b0, 0x8701, + 0xac0c, 0x2582, 0xda44, 0x9bcc, 0x6828, 0x1c53, 0xbd8f, 0xbd2c}}, + /* example with field size as modulus needing 637 divsteps */ + {{0xaec3, 0xa7cf, 0x2f2d, 0x0693, 0x5ad5, 0xa8ff, 0x7ec7, 0x30ff, + 0x0c8b, 0xc242, 0xcab2, 0x063a, 0xf86e, 0x6057, 0x9cbd, 0xf6d8}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0310, 0x579d, 0xcb38, 0x9030, 0x3ded, 0x9bb9, 0x1234, 0x63ce, + 0x0c63, 0x8e3d, 0xacfe, 0x3c20, 0xdc85, 0xf859, 0x919e, 0x1d45}}, + /* example with field size as modulus needing 564 divsteps starting at delta=1/2 */ + {{0x63ae, 0x8d10, 0x0071, 0xdb5c, 0xb454, 0x78d1, 0x744a, 0x5f8e, + 0xe4d8, 0x87b1, 0x8e62, 0x9590, 0xcede, 0xa070, 0x36b4, 0x7f6f}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfdc8, 0xe8d5, 0xbe15, 0x9f86, 0xa5fe, 0xf18e, 0xa7ff, 0xd291, + 0xf4c2, 0x9c87, 0xf150, 0x073e, 0x69b8, 0xf7c4, 0xee4b, 0xc7e6}}, + /* Test case with the field size as modulus, needing 935 divsteps with + broken eta handling. */ + {{0x1b37, 0xbdc3, 0x8bcd, 0x25e3, 0x1eae, 0x567d, 0x30b6, 0xf0d8, + 0x9277, 0x0cf8, 0x9c2e, 0xecd7, 0x631d, 0xe38f, 0xd4f8, 0x5c93}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x1622, 0xe05b, 0xe880, 0x7de9, 0x3e45, 0xb682, 0xee6c, 0x67ed, + 0xa179, 0x15db, 0x6b0d, 0xa656, 0x7ccb, 0x8ef7, 0xa2ff, 0xe279}}, + /* Test case with the field size as modulus, input = 0. */ + {{0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the field size as modulus, input = 1. */ + {{0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}}, + /* Test case with the field size as modulus, input = 2. */ + {{0x0002, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfe18, 0x7fff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0x7fff}}, + /* Test case with the field size as modulus, input = field - 1. */ + {{0xfc2e, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}, + {0xfc2e, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff}}, + + /* Selected from a large number of random inputs to reach small/large + * d/e values in various configurations. */ + {{0x3a08, 0x23e1, 0x4d8c, 0xe606, 0x3263, 0x67af, 0x9bf1, 0x9d70, + 0xf5fd, 0x12e4, 0x03c8, 0xb9ca, 0xe847, 0x8c5d, 0x6322, 0xbd30}, + {0x8359, 0x59dd, 0x1831, 0x7c1a, 0x1e83, 0xaee1, 0x770d, 0xcea8, + 0xfbb1, 0xeed6, 0x10b5, 0xe2c6, 0x36ea, 0xee17, 0xe32c, 0xffff}, + {0x1727, 0x0f36, 0x6f85, 0x5d0c, 0xca6c, 0x3072, 0x9628, 0x5842, + 0xcb44, 0x7c2b, 0xca4f, 0x62e5, 0x29b1, 0x6ffd, 0x9055, 0xc196}}, + {{0x905d, 0x41c8, 0xa2ff, 0x295b, 0x72bb, 0x4679, 0x6d01, 0x2c98, + 0xb3e0, 0xc537, 0xa310, 0xe07e, 0xe72f, 0x4999, 0x1148, 0xf65e}, + {0x5b41, 0x4239, 0x3c37, 0x5130, 0x30e3, 0xff35, 0xc51f, 0x1a43, + 0xdb23, 0x13cf, 0x9f49, 0xf70c, 0x5e70, 0xd411, 0x3005, 0xf8c6}, + {0xc30e, 0x68f0, 0x201a, 0xe10c, 0x864a, 0x6243, 0xe946, 0x43ae, + 0xf3f1, 0x52dc, 0x1f7f, 0x50d4, 0x2797, 0x064c, 0x5ca4, 0x90e3}}, + {{0xf1b5, 0xc6e5, 0xd2c4, 0xff95, 0x27c5, 0x0c92, 0x5d19, 0x7ae5, + 0x4fbe, 0x5438, 0x99e1, 0x880d, 0xd892, 0xa05c, 0x6ffd, 0x7eac}, + {0x2153, 0xcc9d, 0xfc6c, 0x8358, 0x49a1, 0x01e2, 0xcef0, 0x4969, + 0xd69a, 0x8cef, 0xf5b2, 0xfd95, 0xdcc2, 0x71f4, 0x6ae2, 0xceeb}, + {0x9b2e, 0xcdc6, 0x0a5c, 0x7317, 0x9084, 0xe228, 0x56cf, 0xd512, + 0x628a, 0xce21, 0x3473, 0x4e13, 0x8823, 0x1ed0, 0x34d0, 0xbfa3}}, + {{0x5bae, 0x53e5, 0x5f4d, 0x21ca, 0xb875, 0x8ecf, 0x9aa6, 0xbe3c, + 0x9f96, 0x7b82, 0x375d, 0x4d3e, 0x491c, 0xb1eb, 0x04c9, 0xb6c8}, + {0xfcfd, 0x10b7, 0x73b2, 0xd23b, 0xa357, 0x67da, 0x0d9f, 0x8702, + 0xa037, 0xff8e, 0x0e8b, 0x1801, 0x2c5c, 0x4e6e, 0x4558, 0xfff2}, + {0xc50f, 0x5654, 0x6713, 0x5ef5, 0xa7ce, 0xa647, 0xc832, 0x69ce, + 0x1d5c, 0x4310, 0x0746, 0x5a01, 0x96ea, 0xde4b, 0xa88b, 0x5543}}, + {{0xdc7f, 0x5e8c, 0x89d1, 0xb077, 0xd521, 0xcf90, 0x32fa, 0x5737, + 0x839e, 0x1464, 0x007c, 0x09c6, 0x9371, 0xe8ea, 0xc1cb, 0x75c4}, + {0xe3a3, 0x107f, 0xa82a, 0xa375, 0x4578, 0x60f4, 0x75c9, 0x5ee4, + 0x3fd7, 0x2736, 0x2871, 0xd3d2, 0x5f1d, 0x1abb, 0xa764, 0xffff}, + {0x45c6, 0x1f2e, 0xb14c, 0x84d7, 0x7bb7, 0x5a04, 0x0504, 0x3f33, + 0x5cc1, 0xb07a, 0x6a6c, 0x786f, 0x647f, 0xe1d7, 0x78a2, 0x4cf4}}, + {{0xc006, 0x356f, 0x8cd2, 0x967b, 0xb49e, 0x2d4e, 0x14bf, 0x4bcb, + 0xddab, 0xd3f9, 0xa068, 0x2c1c, 0xd242, 0xa56d, 0xf2c7, 0x5f97}, + {0x465b, 0xb745, 0x0e0d, 0x69a9, 0x987d, 0xcb37, 0xf637, 0xb311, + 0xc4d6, 0x2ddb, 0xf68f, 0x2af9, 0x959d, 0x3f53, 0x98f2, 0xf640}, + {0xc0f2, 0x6bfb, 0xf5c3, 0x91c1, 0x6b05, 0x0825, 0x5ca0, 0x7df7, + 0x9d55, 0x6d9e, 0xfe94, 0x2ad9, 0xd9f0, 0xe68b, 0xa72b, 0xd1b2}}, + {{0x2279, 0x61ba, 0x5bc6, 0x136b, 0xf544, 0x717c, 0xafda, 0x02bd, + 0x79af, 0x1fad, 0xea09, 0x81bb, 0x932b, 0x32c9, 0xdf1d, 0xe576}, + {0x8215, 0x7817, 0xca82, 0x43b0, 0x9b06, 0xea65, 0x1291, 0x0621, + 0x0089, 0x46fe, 0xc5a6, 0xddd7, 0x8065, 0xc6a0, 0x214b, 0xfc64}, + {0x04bf, 0x6f2a, 0x86b2, 0x841a, 0x4a95, 0xc632, 0x97b7, 0x5821, + 0x2b18, 0x1bb0, 0x3e97, 0x935e, 0xcc7d, 0x066b, 0xd513, 0xc251}}, + {{0x76e8, 0x5bc2, 0x3eaa, 0x04fc, 0x9974, 0x92c1, 0x7c15, 0xfa89, + 0x1151, 0x36ee, 0x48b2, 0x049c, 0x5f16, 0xcee4, 0x925b, 0xe98e}, + {0x913f, 0x0a2d, 0xa185, 0x9fea, 0xda5a, 0x4025, 0x40d7, 0x7cfa, + 0x88ca, 0xbbe8, 0xb265, 0xb7e4, 0x6cb1, 0xed64, 0xc6f9, 0xffb5}, + {0x6ab1, 0x1a86, 0x5009, 0x152b, 0x1cc4, 0xe2c8, 0x960b, 0x19d0, + 0x3554, 0xc562, 0xd013, 0xcf91, 0x10e1, 0x7933, 0xe195, 0xcf49}}, + {{0x9cb5, 0xd2d7, 0xc6ed, 0xa818, 0xb495, 0x06ee, 0x0f4a, 0x06e3, + 0x4c5a, 0x80ce, 0xd49a, 0x4cd7, 0x7487, 0x92af, 0xe516, 0x676c}, + {0xd6e9, 0x6b85, 0x619a, 0xb52c, 0x20a0, 0x2f79, 0x3545, 0x1edd, + 0x5a6f, 0x8082, 0x9b80, 0xf8f8, 0xc78a, 0xd0a3, 0xadf4, 0xffff}, + {0x01c2, 0x2118, 0xef5e, 0xa877, 0x046a, 0xd2c2, 0x2ad5, 0x951c, + 0x8900, 0xa5c9, 0x8d0f, 0x6b61, 0x55d3, 0xd572, 0x48de, 0x9219}}, + {{0x5114, 0x0644, 0x23dd, 0x01d3, 0xc101, 0xa659, 0xea17, 0x640f, + 0xf767, 0x2644, 0x9cec, 0xd8ba, 0xd6da, 0x9156, 0x8aeb, 0x875a}, + {0xc1bf, 0xdae9, 0xe96b, 0xce77, 0xf7a1, 0x3e99, 0x5c2e, 0x973b, + 0xd048, 0x5bd0, 0x4e8a, 0xcb85, 0xce39, 0x37f5, 0x815d, 0xffff}, + {0x48cc, 0x35b6, 0x26d4, 0x2ea6, 0x50d6, 0xa2f9, 0x64b6, 0x03bf, + 0xd00c, 0xe057, 0x3343, 0xfb79, 0x3ce5, 0xf717, 0xc5af, 0xe185}}, + {{0x13ff, 0x6c76, 0x2077, 0x16e0, 0xd5ca, 0xf2ad, 0x8dba, 0x8f49, + 0x7887, 0x16f9, 0xb646, 0xfc87, 0xfa31, 0x5096, 0xf08c, 0x3fbe}, + {0x8139, 0x6fd7, 0xf6df, 0xa7bf, 0x6699, 0x5361, 0x6f65, 0x13c8, + 0xf4d1, 0xe28f, 0xc545, 0x0a8c, 0x5274, 0xb0a6, 0xffff, 0xffff}, + {0x22ca, 0x0cd6, 0xc1b5, 0xb064, 0x44a7, 0x297b, 0x495f, 0x34ac, + 0xfa95, 0xec62, 0xf08d, 0x621c, 0x66a6, 0xba94, 0x84c6, 0x8ee0}}, + {{0xaa30, 0x312e, 0x439c, 0x4e88, 0x2e2f, 0x32dc, 0xb880, 0xa28e, + 0xf795, 0xc910, 0xb406, 0x8dd7, 0xb187, 0xa5a5, 0x38f1, 0xe49e}, + {0xfb19, 0xf64a, 0xba6a, 0x8ec2, 0x7255, 0xce89, 0x2cf9, 0x9cba, + 0xe1fe, 0x50da, 0x1705, 0xac52, 0xe3d4, 0x4269, 0x0648, 0xfd77}, + {0xb4c8, 0x6e8a, 0x2b5f, 0x4c2d, 0x5a67, 0xa7bb, 0x7d6d, 0x5569, + 0xa0ea, 0x244a, 0xc0f2, 0xf73d, 0x58cf, 0xac7f, 0xd32b, 0x3018}}, + {{0xc953, 0x1ae1, 0xae46, 0x8709, 0x19c2, 0xa986, 0x9abe, 0x1611, + 0x0395, 0xd5ab, 0xf0f6, 0xb5b0, 0x5b2b, 0x0317, 0x80ba, 0x376d}, + {0xfe77, 0xbc03, 0xac2f, 0x9d00, 0xa175, 0x293d, 0x3b56, 0x0e3a, + 0x0a9c, 0xf40c, 0x690e, 0x1508, 0x95d4, 0xddc4, 0xe805, 0xffff}, + {0xb1ce, 0x0929, 0xa5fe, 0x4b50, 0x9d5d, 0x8187, 0x2557, 0x4376, + 0x11ba, 0xdcef, 0xc1f3, 0xd531, 0x1824, 0x93f6, 0xd81f, 0x8f83}}, + {{0xb8d2, 0xb900, 0x4a0c, 0x7188, 0xa5bf, 0x1b0b, 0x2ae5, 0xa35b, + 0x98e0, 0x610c, 0x86db, 0x2487, 0xa267, 0x002c, 0xebb6, 0xc5f4}, + {0x9cdd, 0x1c1b, 0x2f06, 0x43d1, 0xce47, 0xc334, 0x6e60, 0xc016, + 0x989e, 0x0ab2, 0x0cac, 0x1196, 0xe2d9, 0x2e04, 0xc62b, 0xffff}, + {0xdc36, 0x1f05, 0x6aa9, 0x7a20, 0x944f, 0x2fd3, 0xa553, 0xdb4f, + 0xbd5c, 0x3a75, 0x25d4, 0xe20e, 0xa387, 0x1410, 0xdbb1, 0x1b60}}, + {{0x76b3, 0x2207, 0x4930, 0x5dd7, 0x65a0, 0xd55c, 0xb443, 0x53b7, + 0x5c22, 0x818a, 0xb2e7, 0x9de8, 0x9985, 0xed45, 0x33b1, 0x53e8}, + {0x7913, 0x44e1, 0xf15b, 0x5edd, 0x34f3, 0x4eba, 0x0758, 0x7104, + 0x32d9, 0x28f3, 0x4401, 0x85c5, 0xb695, 0xb899, 0xc0f2, 0xffff}, + {0x7f43, 0xd202, 0x24c9, 0x69f3, 0x74dc, 0x1a69, 0xeaee, 0x5405, + 0x1755, 0x4bb8, 0x04e3, 0x2fd2, 0xada8, 0x39eb, 0x5b4d, 0x96ca}}, + {{0x807b, 0x7112, 0xc088, 0xdafd, 0x02fa, 0x9d95, 0x5e42, 0xc033, + 0xde0a, 0xeecf, 0x8e90, 0x8da1, 0xb17e, 0x9a5b, 0x4c6d, 0x1914}, + {0x4871, 0xd1cb, 0x47d7, 0x327f, 0x09ec, 0x97bb, 0x2fae, 0xd346, + 0x6b78, 0x3707, 0xfeb2, 0xa6ab, 0x13df, 0x76b0, 0x8fb9, 0xffb3}, + {0x179e, 0xb63b, 0x4784, 0x231e, 0x9f42, 0x7f1a, 0xa3fb, 0xdd8c, + 0xd1eb, 0xb4c9, 0x8ca7, 0x018c, 0xf691, 0x576c, 0xa7d6, 0xce27}}, + {{0x5f45, 0x7c64, 0x083d, 0xedd5, 0x08a0, 0x0c64, 0x6c6f, 0xec3c, + 0xe2fb, 0x352c, 0x9303, 0x75e4, 0xb4e0, 0x8b09, 0xaca4, 0x7025}, + {0x1025, 0xb482, 0xfed5, 0xa678, 0x8966, 0x9359, 0x5329, 0x98bb, + 0x85b2, 0x73ba, 0x9982, 0x6fdc, 0xf190, 0xbe8c, 0xdc5c, 0xfd93}, + {0x83a2, 0x87a4, 0xa680, 0x52a1, 0x1ba1, 0x8848, 0x5db7, 0x9744, + 0x409c, 0x0745, 0x0e1e, 0x1cfc, 0x00cd, 0xf573, 0x2071, 0xccaa}}, + {{0xf61f, 0x63d4, 0x536c, 0x9eb9, 0x5ddd, 0xbb11, 0x9014, 0xe904, + 0xfe01, 0x6b45, 0x1858, 0xcb5b, 0x4c38, 0x43e1, 0x381d, 0x7f94}, + {0xf61f, 0x63d4, 0xd810, 0x7ca3, 0x8a04, 0x4b83, 0x11fc, 0xdf94, + 0x4169, 0xbd05, 0x608e, 0x7151, 0x4fbf, 0xb31a, 0x38a7, 0xa29b}, + {0xe621, 0xdfa5, 0x3d06, 0x1d03, 0x81e6, 0x00da, 0x53a6, 0x965e, + 0x93e5, 0x2164, 0x5b61, 0x59b8, 0xa629, 0x8d73, 0x699a, 0x6111}}, + {{0x4cc3, 0xd29e, 0xf4a3, 0x3428, 0x2048, 0xeec9, 0x5f50, 0x99a4, + 0x6de9, 0x05f2, 0x5aa9, 0x5fd2, 0x98b4, 0x1adc, 0x225f, 0x777f}, + {0xe649, 0x37da, 0x5ba6, 0x5765, 0x3f4a, 0x8a1c, 0x2e79, 0xf550, + 0x1a54, 0xcd1e, 0x7218, 0x3c3c, 0x6311, 0xfe28, 0x95fb, 0xed97}, + {0xe9b6, 0x0c47, 0x3f0e, 0x849b, 0x11f8, 0xe599, 0x5e4d, 0xd618, + 0xa06d, 0x33a0, 0x9a3e, 0x44db, 0xded8, 0x10f0, 0x94d2, 0x81fb}}, + {{0x2e59, 0x7025, 0xd413, 0x455a, 0x1ce3, 0xbd45, 0x7263, 0x27f7, + 0x23e3, 0x518e, 0xbe06, 0xc8c4, 0xe332, 0x4276, 0x68b4, 0xb166}, + {0x596f, 0x0cf6, 0xc8ec, 0x787b, 0x04c1, 0x473c, 0xd2b8, 0x8d54, + 0x9cdf, 0x77f2, 0xd3f3, 0x6735, 0x0638, 0xf80e, 0x9467, 0xc6aa}, + {0xc7e7, 0x1822, 0xb62a, 0xec0d, 0x89cd, 0x7846, 0xbfa2, 0x35d5, + 0xfa38, 0x870f, 0x494b, 0x1697, 0x8b17, 0xf904, 0x10b6, 0x9822}}, + {{0x6d5b, 0x1d4f, 0x0aaf, 0x807b, 0x35fb, 0x7ee8, 0x00c6, 0x059a, + 0xddf0, 0x1fb1, 0xc38a, 0xd78e, 0x2aa4, 0x79e7, 0xad28, 0xc3f1}, + {0xe3bb, 0x174e, 0xe0a8, 0x74b6, 0xbd5b, 0x35f6, 0x6d23, 0x6328, + 0xc11f, 0x83e1, 0xf928, 0xa918, 0x838e, 0xbf43, 0xe243, 0xfffb}, + {0x9cf2, 0x6b8b, 0x3476, 0x9d06, 0xdcf2, 0xdb8a, 0x89cd, 0x4857, + 0x75c2, 0xabb8, 0x490b, 0xc9bd, 0x890e, 0xe36e, 0xd552, 0xfffa}}, + {{0x2f09, 0x9d62, 0xa9fc, 0xf090, 0xd6d1, 0x9d1d, 0x1828, 0xe413, + 0xc92b, 0x3d5a, 0x1373, 0x368c, 0xbaf2, 0x2158, 0x71eb, 0x08a3}, + {0x2f09, 0x1d62, 0x4630, 0x0de1, 0x06dc, 0xf7f1, 0xc161, 0x1e92, + 0x7495, 0x97e4, 0x94b6, 0xa39e, 0x4f1b, 0x18f8, 0x7bd4, 0x0c4c}, + {0xeb3d, 0x723d, 0x0907, 0x525b, 0x463a, 0x49a8, 0xc6b8, 0xce7f, + 0x740c, 0x0d7d, 0xa83b, 0x457f, 0xae8e, 0xc6af, 0xd331, 0x0475}}, + {{0x6abd, 0xc7af, 0x3e4e, 0x95fd, 0x8fc4, 0xee25, 0x1f9c, 0x0afe, + 0x291d, 0xcde0, 0x48f4, 0xb2e8, 0xf7af, 0x8f8d, 0x0bd6, 0x078d}, + {0x4037, 0xbf0e, 0x2081, 0xf363, 0x13b2, 0x381e, 0xfb6e, 0x818e, + 0x27e4, 0x5662, 0x18b0, 0x0cd2, 0x81f5, 0x9415, 0x0d6c, 0xf9fb}, + {0xd205, 0x0981, 0x0498, 0x1f08, 0xdb93, 0x1732, 0x0579, 0x1424, + 0xad95, 0x642f, 0x050c, 0x1d6d, 0xfc95, 0xfc4a, 0xd41b, 0x3521}}, + {{0xf23a, 0x4633, 0xaef4, 0x1a92, 0x3c8b, 0x1f09, 0x30f3, 0x4c56, + 0x2a2f, 0x4f62, 0xf5e4, 0x8329, 0x63cc, 0xb593, 0xec6a, 0xc428}, + {0x93a7, 0xfcf6, 0x606d, 0xd4b2, 0x2aad, 0x28b4, 0xc65b, 0x8998, + 0x4e08, 0xd178, 0x0900, 0xc82b, 0x7470, 0xa342, 0x7c0f, 0xffff}, + {0x315f, 0xf304, 0xeb7b, 0xe5c3, 0x1451, 0x6311, 0x8f37, 0x93a8, + 0x4a38, 0xa6c6, 0xe393, 0x1087, 0x6301, 0xd673, 0x4ec4, 0xffff}}, + {{0x892e, 0xeed0, 0x1165, 0xcbc1, 0x5545, 0xa280, 0x7243, 0x10c9, + 0x9536, 0x36af, 0xb3fc, 0x2d7c, 0xe8a5, 0x09d6, 0xe1d4, 0xe85d}, + {0xae09, 0xc28a, 0xd777, 0xbd80, 0x23d6, 0xf980, 0xeb7c, 0x4e0e, + 0xf7dc, 0x6475, 0xf10a, 0x2d33, 0x5dfd, 0x797a, 0x7f1c, 0xf71a}, + {0x4064, 0x8717, 0xd091, 0x80b0, 0x4527, 0x8442, 0xac8b, 0x9614, + 0xc633, 0x35f5, 0x7714, 0x2e83, 0x4aaa, 0xd2e4, 0x1acd, 0x0562}}, + {{0xdb64, 0x0937, 0x308b, 0x53b0, 0x00e8, 0xc77f, 0x2f30, 0x37f7, + 0x79ce, 0xeb7f, 0xde81, 0x9286, 0xafda, 0x0e62, 0xae00, 0x0067}, + {0x2cc7, 0xd362, 0xb161, 0x0557, 0x4ff2, 0xb9c8, 0x06fe, 0x5f2b, + 0xde33, 0x0190, 0x28c6, 0xb886, 0xee2b, 0x5a4e, 0x3289, 0x0185}, + {0x4215, 0x923e, 0xf34f, 0xb362, 0x88f8, 0xceec, 0xafdd, 0x7f42, + 0x0c57, 0x56b2, 0xa366, 0x6a08, 0x0826, 0xfb8f, 0x1b03, 0x0163}}, + {{0xa4ba, 0x8408, 0x810a, 0xdeba, 0x47a3, 0x853a, 0xeb64, 0x2f74, + 0x3039, 0x038c, 0x7fbb, 0x498e, 0xd1e9, 0x46fb, 0x5691, 0x32a4}, + {0xd749, 0xb49d, 0x20b7, 0x2af6, 0xd34a, 0xd2da, 0x0a10, 0xf781, + 0x58c9, 0x171f, 0x3cb6, 0x6337, 0x88cd, 0xcf1e, 0xb246, 0x7351}, + {0xf729, 0xcf0a, 0x96ea, 0x032c, 0x4a8f, 0x42fe, 0xbac8, 0xec65, + 0x1510, 0x0d75, 0x4c17, 0x8d29, 0xa03f, 0x8b7e, 0x2c49, 0x0000}}, + {{0x0fa4, 0x8e1c, 0x3788, 0xba3c, 0x8d52, 0xd89d, 0x12c8, 0xeced, + 0x9fe6, 0x9b88, 0xecf3, 0xe3c8, 0xac48, 0x76ed, 0xf23e, 0xda79}, + {0x1103, 0x227c, 0x5b00, 0x3fcf, 0xc5d0, 0x2d28, 0x8020, 0x4d1c, + 0xc6b9, 0x67f9, 0x6f39, 0x989a, 0xda53, 0x3847, 0xd416, 0xe0d0}, + {0xdd8e, 0xcf31, 0x3710, 0x7e44, 0xa511, 0x933c, 0x0cc3, 0x5145, + 0xf632, 0x5e1d, 0x038f, 0x5ce7, 0x7265, 0xda9d, 0xded6, 0x08f8}}, + {{0xe2c8, 0x91d5, 0xa5f5, 0x735f, 0x6b58, 0x56dc, 0xb39d, 0x5c4a, + 0x57d0, 0xa1c2, 0xd92f, 0x9ad4, 0xf7c4, 0x51dd, 0xaf5c, 0x0096}, + {0x1739, 0x7207, 0x7505, 0xbf35, 0x42de, 0x0a29, 0xa962, 0xdedf, + 0x53e8, 0x12bf, 0xcde7, 0xd8e2, 0x8d4d, 0x2c4b, 0xb1b1, 0x0628}, + {0x992d, 0xe3a7, 0xb422, 0xc198, 0x23ab, 0xa6ef, 0xb45d, 0x50da, + 0xa738, 0x014a, 0x2310, 0x85fb, 0x5fe8, 0x1b18, 0x1774, 0x03a7}}, + {{0x1f16, 0x2b09, 0x0236, 0xee90, 0xccf9, 0x9775, 0x8130, 0x4c91, + 0x9091, 0x310b, 0x6dc4, 0x86f6, 0xc2e8, 0xef60, 0xfc0e, 0xf3a4}, + {0x9f49, 0xac15, 0x02af, 0x110f, 0xc59d, 0x5677, 0xa1a9, 0x38d5, + 0x914f, 0xa909, 0x3a3a, 0x4a39, 0x3703, 0xea30, 0x73da, 0xffad}, + {0x15ed, 0xdd16, 0x83c7, 0x270a, 0x862f, 0xd8ad, 0xcaa1, 0x5f41, + 0x99a9, 0x3fc8, 0x7bb2, 0x360a, 0xb06d, 0xfadc, 0x1b36, 0xffa8}}, + {{0xc4e0, 0xb8fd, 0x5106, 0xe169, 0x754c, 0xa58c, 0xc413, 0x8224, + 0x5483, 0x63ec, 0xd477, 0x8473, 0x4778, 0x9281, 0x0000, 0x0000}, + {0x85e1, 0xff54, 0xb200, 0xe413, 0xf4f4, 0x4c0f, 0xfcec, 0xc183, + 0x60d3, 0x1b0c, 0x3834, 0x601c, 0x943c, 0xbe6e, 0x0002, 0x0000}, + {0xf4f8, 0xfd5e, 0x61ef, 0xece8, 0x9199, 0xe5c4, 0x05a6, 0xe6c3, + 0xc4ae, 0x8b28, 0x66b1, 0x8a95, 0x9ece, 0x8f4a, 0x0001, 0x0000}}, + {{0xeae9, 0xa1b4, 0xc6d8, 0x2411, 0x2b5a, 0x1dd0, 0x2dc9, 0xb57b, + 0x5ccd, 0x4957, 0xaf59, 0xa04b, 0x5f42, 0xab7c, 0x2826, 0x526f}, + {0xf407, 0x165a, 0xb724, 0x2f12, 0x2ea1, 0x470b, 0x4464, 0xbd35, + 0x606f, 0xd73e, 0x50d3, 0x8a7f, 0x8029, 0x7ffc, 0xbe31, 0x6cfb}, + {0x8171, 0x1f4c, 0xced2, 0x9c99, 0x6d7e, 0x5a0f, 0xfefb, 0x59e3, + 0xa0c8, 0xabd9, 0xc4c5, 0x57d3, 0xbfa3, 0x4f11, 0x96a2, 0x5a7d}}, + {{0xe068, 0x4cc0, 0x8bcd, 0xc903, 0x9e52, 0xb3e1, 0xd745, 0x0995, + 0xdd8f, 0xf14b, 0xd2ac, 0xd65a, 0xda1d, 0xa742, 0xbac5, 0x474c}, + {0x7481, 0xf2ad, 0x9757, 0x2d82, 0xb683, 0xb16b, 0x0002, 0x7b60, + 0x8f0c, 0x2594, 0x8f64, 0x3b7a, 0x3552, 0x8d9d, 0xb9d7, 0x67eb}, + {0xcaab, 0xb9a1, 0xf966, 0xe311, 0x5b34, 0x0fa0, 0x6abc, 0x8134, + 0xab3d, 0x90f6, 0x1984, 0x9232, 0xec17, 0x74e5, 0x2ceb, 0x434e}}, + {{0x0fb1, 0x7a55, 0x1a5c, 0x53eb, 0xd7b3, 0x7a01, 0xca32, 0x31f6, + 0x3b74, 0x679e, 0x1501, 0x6c57, 0xdb20, 0x8b7c, 0xd7d0, 0x8097}, + {0xb127, 0xb20c, 0xe3a2, 0x96f3, 0xe0d8, 0xd50c, 0x14b4, 0x0b40, + 0x6eeb, 0xa258, 0x99db, 0x3c8c, 0x0f51, 0x4198, 0x3887, 0xffd0}, + {0x0273, 0x9f8c, 0x9669, 0xbbba, 0x1c49, 0x767c, 0xc2af, 0x59f0, + 0x1366, 0xd397, 0x63ac, 0x6fe8, 0x1a9a, 0x1259, 0x01d0, 0x0016}}, + {{0x7876, 0x2a35, 0xa24a, 0x433e, 0x5501, 0x573c, 0xd76d, 0xcb82, + 0x1334, 0xb4a6, 0xf290, 0xc797, 0xeae9, 0x2b83, 0x1e2b, 0x8b14}, + {0x3885, 0x8aef, 0x9dea, 0x2b8c, 0xdd7c, 0xd7cd, 0xb0cc, 0x05ee, + 0x361b, 0x3800, 0xb0d4, 0x4c23, 0xbd3f, 0x5180, 0x9783, 0xff80}, + {0xab36, 0x3104, 0xdae8, 0x0704, 0x4a28, 0x6714, 0x824b, 0x0051, + 0x8134, 0x1f6a, 0x712d, 0x1f03, 0x03b2, 0xecac, 0x377d, 0xfef9}} + }; + + int i, j, ok; + + /* Test known inputs/outputs */ + for (i = 0; (size_t)i < sizeof(CASES) / sizeof(CASES[0]); ++i) { + uint16_t out[16]; + test_modinv32_uint16(out, CASES[i][0], CASES[i][1]); + for (j = 0; j < 16; ++j) CHECK(out[j] == CASES[i][2][j]); +#ifdef SECP256K1_WIDEMUL_INT128 + test_modinv64_uint16(out, CASES[i][0], CASES[i][1]); + for (j = 0; j < 16; ++j) CHECK(out[j] == CASES[i][2][j]); +#endif + } + + for (i = 0; i < 100 * count; ++i) { + /* 256-bit numbers in 16-uint16_t's notation */ + static const uint16_t ZERO[16] = {0}; + uint16_t xd[16]; /* the number (in range [0,2^256)) to be inverted */ + uint16_t md[16]; /* the modulus (odd, in range [3,2^256)) */ + uint16_t id[16]; /* the inverse of xd mod md */ + + /* generate random xd and md, so that md is odd, md>1, xd 0)); - rustsecp256k1zkp_v0_4_0_scalar_negate(&neg, &s); - rustsecp256k1zkp_v0_4_0_num_sub(&negnum, &order, &snum); - rustsecp256k1zkp_v0_4_0_num_mod(&negnum, &order); - /* Check that comparison with the half order is equal to testing for high scalar after negation. */ - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_high(&neg) == (rustsecp256k1zkp_v0_4_0_num_cmp(&negnum, &half_order) > 0)); - /* Negating should change the high property, unless the value was already zero. */ - CHECK((rustsecp256k1zkp_v0_4_0_scalar_is_high(&s) == rustsecp256k1zkp_v0_4_0_scalar_is_high(&neg)) == rustsecp256k1zkp_v0_4_0_scalar_is_zero(&s)); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&negnum2, &neg); - /* Negating a scalar should be equal to (order - n) mod order on the number. */ - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&negnum, &negnum2)); - rustsecp256k1zkp_v0_4_0_scalar_add(&neg, &neg, &s); - /* Adding a number to its negation should result in zero. */ - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(&neg)); - rustsecp256k1zkp_v0_4_0_scalar_negate(&neg, &neg); - /* Negating zero should still result in zero. */ - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(&neg)); - } - - { - /* Test rustsecp256k1zkp_v0_4_0_scalar_mul_shift_var. */ - rustsecp256k1zkp_v0_4_0_scalar r; - rustsecp256k1zkp_v0_4_0_num one; - rustsecp256k1zkp_v0_4_0_num rnum; - rustsecp256k1zkp_v0_4_0_num rnum2; - unsigned char cone[1] = {0x01}; - unsigned int shift = 256 + rustsecp256k1zkp_v0_4_0_testrand_int(257); - rustsecp256k1zkp_v0_4_0_scalar_mul_shift_var(&r, &s1, &s2, shift); - rustsecp256k1zkp_v0_4_0_num_mul(&rnum, &s1num, &s2num); - rustsecp256k1zkp_v0_4_0_num_shift(&rnum, shift - 1); - rustsecp256k1zkp_v0_4_0_num_set_bin(&one, cone, 1); - rustsecp256k1zkp_v0_4_0_num_add(&rnum, &rnum, &one); - rustsecp256k1zkp_v0_4_0_num_shift(&rnum, 1); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&rnum2, &r); - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&rnum, &rnum2)); - } - { /* test rustsecp256k1zkp_v0_4_0_scalar_shr_int */ rustsecp256k1zkp_v0_4_0_scalar r; @@ -1001,34 +1696,6 @@ void scalar_test(void) { CHECK(expected == low); } } -#endif - - { - /* Test that scalar inverses are equal to the inverse of their number modulo the order. */ - if (!rustsecp256k1zkp_v0_4_0_scalar_is_zero(&s)) { - rustsecp256k1zkp_v0_4_0_scalar inv; -#ifndef USE_NUM_NONE - rustsecp256k1zkp_v0_4_0_num invnum; - rustsecp256k1zkp_v0_4_0_num invnum2; -#endif - rustsecp256k1zkp_v0_4_0_scalar_inverse(&inv, &s); -#ifndef USE_NUM_NONE - rustsecp256k1zkp_v0_4_0_num_mod_inverse(&invnum, &snum, &order); - rustsecp256k1zkp_v0_4_0_scalar_get_num(&invnum2, &inv); - CHECK(rustsecp256k1zkp_v0_4_0_num_eq(&invnum, &invnum2)); -#endif - rustsecp256k1zkp_v0_4_0_scalar_mul(&inv, &inv, &s); - /* Multiplying a scalar with its inverse must result in one. */ - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_one(&inv)); - rustsecp256k1zkp_v0_4_0_scalar_inverse(&inv, &inv); - /* Inverting one must result in one. */ - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_one(&inv)); -#ifndef USE_NUM_NONE - rustsecp256k1zkp_v0_4_0_scalar_get_num(&invnum, &inv); - CHECK(rustsecp256k1zkp_v0_4_0_num_is_one(&invnum)); -#endif - } - } { /* Test commutativity of add. */ @@ -1100,14 +1767,6 @@ void scalar_test(void) { CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&r1, &r2)); } - { - /* Test square. */ - rustsecp256k1zkp_v0_4_0_scalar r1, r2; - rustsecp256k1zkp_v0_4_0_scalar_sqr(&r1, &s1); - rustsecp256k1zkp_v0_4_0_scalar_mul(&r2, &s1, &s1); - CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&r1, &r2)); - } - { /* Test multiplicative identity. */ rustsecp256k1zkp_v0_4_0_scalar r1, v1; @@ -1282,48 +1941,6 @@ void run_scalar_tests(void) { CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(&o)); } -#ifndef USE_NUM_NONE - { - /* Test rustsecp256k1zkp_v0_4_0_scalar_set_b32 boundary conditions */ - rustsecp256k1zkp_v0_4_0_num order; - rustsecp256k1zkp_v0_4_0_scalar scalar; - unsigned char bin[32]; - unsigned char bin_tmp[32]; - int overflow = 0; - /* 2^256-1 - order */ - static const rustsecp256k1zkp_v0_4_0_scalar all_ones_minus_order = SECP256K1_SCALAR_CONST( - 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000001UL, - 0x45512319UL, 0x50B75FC4UL, 0x402DA173UL, 0x2FC9BEBEUL - ); - - /* A scalar set to 0s should be 0. */ - memset(bin, 0, 32); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 0); - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(&scalar)); - - /* A scalar with value of the curve order should be 0. */ - rustsecp256k1zkp_v0_4_0_scalar_order_get_num(&order); - rustsecp256k1zkp_v0_4_0_num_get_bin(bin, 32, &order); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 1); - CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(&scalar)); - - /* A scalar with value of the curve order minus one should not overflow. */ - bin[31] -= 1; - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 0); - rustsecp256k1zkp_v0_4_0_scalar_get_b32(bin_tmp, &scalar); - CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(bin, bin_tmp, 32) == 0); - - /* A scalar set to all 1s should overflow. */ - memset(bin, 0xFF, 32); - rustsecp256k1zkp_v0_4_0_scalar_set_b32(&scalar, bin, &overflow); - CHECK(overflow == 1); - CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&scalar, &all_ones_minus_order)); - } -#endif - { /* Does check_overflow check catch all ones? */ static const rustsecp256k1zkp_v0_4_0_scalar overflowed = SECP256K1_SCALAR_CONST( @@ -1346,9 +1963,7 @@ void run_scalar_tests(void) { rustsecp256k1zkp_v0_4_0_scalar one; rustsecp256k1zkp_v0_4_0_scalar r1; rustsecp256k1zkp_v0_4_0_scalar r2; -#if defined(USE_SCALAR_INV_NUM) rustsecp256k1zkp_v0_4_0_scalar zzv; -#endif int overflow; unsigned char chal[33][2][32] = { {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00, @@ -1898,10 +2513,8 @@ void run_scalar_tests(void) { if (!rustsecp256k1zkp_v0_4_0_scalar_is_zero(&y)) { rustsecp256k1zkp_v0_4_0_scalar_inverse(&zz, &y); CHECK(!rustsecp256k1zkp_v0_4_0_scalar_check_overflow(&zz)); -#if defined(USE_SCALAR_INV_NUM) rustsecp256k1zkp_v0_4_0_scalar_inverse_var(&zzv, &y); CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&zzv, &zz)); -#endif rustsecp256k1zkp_v0_4_0_scalar_mul(&z, &z, &zz); CHECK(!rustsecp256k1zkp_v0_4_0_scalar_check_overflow(&z)); CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&x, &z)); @@ -1909,12 +2522,6 @@ void run_scalar_tests(void) { CHECK(!rustsecp256k1zkp_v0_4_0_scalar_check_overflow(&zz)); CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&one, &zz)); } - rustsecp256k1zkp_v0_4_0_scalar_mul(&z, &x, &x); - CHECK(!rustsecp256k1zkp_v0_4_0_scalar_check_overflow(&z)); - rustsecp256k1zkp_v0_4_0_scalar_sqr(&zz, &x); - CHECK(!rustsecp256k1zkp_v0_4_0_scalar_check_overflow(&zz)); - CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&zz, &z)); - CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&r2, &zz)); } } } @@ -1970,13 +2577,6 @@ int check_fe_equal(const rustsecp256k1zkp_v0_4_0_fe *a, const rustsecp256k1zkp_v return rustsecp256k1zkp_v0_4_0_fe_equal_var(&an, &bn); } -int check_fe_inverse(const rustsecp256k1zkp_v0_4_0_fe *a, const rustsecp256k1zkp_v0_4_0_fe *ai) { - rustsecp256k1zkp_v0_4_0_fe x; - rustsecp256k1zkp_v0_4_0_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1); - rustsecp256k1zkp_v0_4_0_fe_mul(&x, a, ai); - return check_fe_equal(&x, &one); -} - void run_field_convert(void) { static const unsigned char b32[32] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, @@ -2096,27 +2696,67 @@ void run_field_misc(void) { } } -void run_field_inv(void) { - rustsecp256k1zkp_v0_4_0_fe x, xi, xii; +void test_fe_mul(const rustsecp256k1zkp_v0_4_0_fe* a, const rustsecp256k1zkp_v0_4_0_fe* b, int use_sqr) +{ + rustsecp256k1zkp_v0_4_0_fe c, an, bn; + /* Variables in BE 32-byte format. */ + unsigned char a32[32], b32[32], c32[32]; + /* Variables in LE 16x uint16_t format. */ + uint16_t a16[16], b16[16], c16[16]; + /* Field modulus in LE 16x uint16_t format. */ + static const uint16_t m16[16] = { + 0xfc2f, 0xffff, 0xfffe, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, 0xffff, + }; + uint16_t t16[32]; int i; - for (i = 0; i < 10*count; i++) { - random_fe_non_zero(&x); - rustsecp256k1zkp_v0_4_0_fe_inv(&xi, &x); - CHECK(check_fe_inverse(&x, &xi)); - rustsecp256k1zkp_v0_4_0_fe_inv(&xii, &xi); - CHECK(check_fe_equal(&x, &xii)); + + /* Compute C = A * B in fe format. */ + c = *a; + if (use_sqr) { + rustsecp256k1zkp_v0_4_0_fe_sqr(&c, &c); + } else { + rustsecp256k1zkp_v0_4_0_fe_mul(&c, &c, b); } + + /* Convert A, B, C into LE 16x uint16_t format. */ + an = *a; + bn = *b; + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&c); + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&an); + rustsecp256k1zkp_v0_4_0_fe_normalize_var(&bn); + rustsecp256k1zkp_v0_4_0_fe_get_b32(a32, &an); + rustsecp256k1zkp_v0_4_0_fe_get_b32(b32, &bn); + rustsecp256k1zkp_v0_4_0_fe_get_b32(c32, &c); + for (i = 0; i < 16; ++i) { + a16[i] = a32[31 - 2*i] + ((uint16_t)a32[30 - 2*i] << 8); + b16[i] = b32[31 - 2*i] + ((uint16_t)b32[30 - 2*i] << 8); + c16[i] = c32[31 - 2*i] + ((uint16_t)c32[30 - 2*i] << 8); + } + /* Compute T = A * B in LE 16x uint16_t format. */ + mulmod256(t16, a16, b16, m16); + /* Compare */ + CHECK(rustsecp256k1zkp_v0_4_0_memcmp_var(t16, c16, 32) == 0); } -void run_field_inv_var(void) { - rustsecp256k1zkp_v0_4_0_fe x, xi, xii; +void run_fe_mul(void) { int i; - for (i = 0; i < 10*count; i++) { - random_fe_non_zero(&x); - rustsecp256k1zkp_v0_4_0_fe_inv_var(&xi, &x); - CHECK(check_fe_inverse(&x, &xi)); - rustsecp256k1zkp_v0_4_0_fe_inv_var(&xii, &xi); - CHECK(check_fe_equal(&x, &xii)); + for (i = 0; i < 100 * count; ++i) { + rustsecp256k1zkp_v0_4_0_fe a, b, c, d; + random_fe(&a); + random_field_element_magnitude(&a); + random_fe(&b); + random_field_element_magnitude(&b); + random_fe_test(&c); + random_field_element_magnitude(&c); + random_fe_test(&d); + random_field_element_magnitude(&d); + test_fe_mul(&a, &a, 1); + test_fe_mul(&c, &c, 1); + test_fe_mul(&a, &b, 0); + test_fe_mul(&a, &c, 0); + test_fe_mul(&c, &b, 0); + test_fe_mul(&c, &d, 0); } } @@ -2184,6 +2824,318 @@ void run_sqrt(void) { } } +/***** FIELD/SCALAR INVERSE TESTS *****/ + +static const rustsecp256k1zkp_v0_4_0_scalar scalar_minus_one = SECP256K1_SCALAR_CONST( + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, + 0xBAAEDCE6, 0xAF48A03B, 0xBFD25E8C, 0xD0364140 +); + +static const rustsecp256k1zkp_v0_4_0_fe fe_minus_one = SECP256K1_FE_CONST( + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFC2E +); + +/* These tests test the following identities: + * + * for x==0: 1/x == 0 + * for x!=0: x*(1/x) == 1 + * for x!=0 and x!=1: 1/(1/x - 1) + 1 == -1/(x-1) + */ + +void test_inverse_scalar(rustsecp256k1zkp_v0_4_0_scalar* out, const rustsecp256k1zkp_v0_4_0_scalar* x, int var) +{ + rustsecp256k1zkp_v0_4_0_scalar l, r, t; + + (var ? rustsecp256k1zkp_v0_4_0_scalar_inverse_var : rustsecp256k1zkp_v0_4_0_scalar_inverse)(&l, x); /* l = 1/x */ + if (out) *out = l; + if (rustsecp256k1zkp_v0_4_0_scalar_is_zero(x)) { + CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(&l)); + return; + } + rustsecp256k1zkp_v0_4_0_scalar_mul(&t, x, &l); /* t = x*(1/x) */ + CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_one(&t)); /* x*(1/x) == 1 */ + rustsecp256k1zkp_v0_4_0_scalar_add(&r, x, &scalar_minus_one); /* r = x-1 */ + if (rustsecp256k1zkp_v0_4_0_scalar_is_zero(&r)) return; + (var ? rustsecp256k1zkp_v0_4_0_scalar_inverse_var : rustsecp256k1zkp_v0_4_0_scalar_inverse)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1zkp_v0_4_0_scalar_add(&l, &scalar_minus_one, &l); /* l = 1/x-1 */ + (var ? rustsecp256k1zkp_v0_4_0_scalar_inverse_var : rustsecp256k1zkp_v0_4_0_scalar_inverse)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1zkp_v0_4_0_scalar_add(&l, &l, &rustsecp256k1zkp_v0_4_0_scalar_one); /* l = 1/(1/x-1)+1 */ + rustsecp256k1zkp_v0_4_0_scalar_add(&l, &r, &l); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1zkp_v0_4_0_scalar_is_zero(&l)); /* l == 0 */ +} + +void test_inverse_field(rustsecp256k1zkp_v0_4_0_fe* out, const rustsecp256k1zkp_v0_4_0_fe* x, int var) +{ + rustsecp256k1zkp_v0_4_0_fe l, r, t; + + (var ? rustsecp256k1zkp_v0_4_0_fe_inv_var : rustsecp256k1zkp_v0_4_0_fe_inv)(&l, x) ; /* l = 1/x */ + if (out) *out = l; + t = *x; /* t = x */ + if (rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(&t)) { + CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(&l)); + return; + } + rustsecp256k1zkp_v0_4_0_fe_mul(&t, x, &l); /* t = x*(1/x) */ + rustsecp256k1zkp_v0_4_0_fe_add(&t, &fe_minus_one); /* t = x*(1/x)-1 */ + CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero(&t)); /* x*(1/x)-1 == 0 */ + r = *x; /* r = x */ + rustsecp256k1zkp_v0_4_0_fe_add(&r, &fe_minus_one); /* r = x-1 */ + if (rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(&r)) return; + (var ? rustsecp256k1zkp_v0_4_0_fe_inv_var : rustsecp256k1zkp_v0_4_0_fe_inv)(&r, &r); /* r = 1/(x-1) */ + rustsecp256k1zkp_v0_4_0_fe_add(&l, &fe_minus_one); /* l = 1/x-1 */ + (var ? rustsecp256k1zkp_v0_4_0_fe_inv_var : rustsecp256k1zkp_v0_4_0_fe_inv)(&l, &l); /* l = 1/(1/x-1) */ + rustsecp256k1zkp_v0_4_0_fe_add(&l, &rustsecp256k1zkp_v0_4_0_fe_one); /* l = 1/(1/x-1)+1 */ + rustsecp256k1zkp_v0_4_0_fe_add(&l, &r); /* l = 1/(1/x-1)+1 + 1/(x-1) */ + CHECK(rustsecp256k1zkp_v0_4_0_fe_normalizes_to_zero_var(&l)); /* l == 0 */ +} + +void run_inverse_tests(void) +{ + /* Fixed test cases for field inverses: pairs of (x, 1/x) mod p. */ + static const rustsecp256k1zkp_v0_4_0_fe fe_cases[][2] = { + /* 0 */ + {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0), + SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, + /* 1 */ + {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1), + SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1)}, + /* -1 */ + {SECP256K1_FE_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xfffffc2e), + SECP256K1_FE_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xfffffc2e)}, + /* 2 */ + {SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2), + SECP256K1_FE_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x7ffffe18)}, + /* 2**128 */ + {SECP256K1_FE_CONST(0, 0, 0, 1, 0, 0, 0, 0), + SECP256K1_FE_CONST(0xbcb223fe, 0xdc24a059, 0xd838091d, 0xd2253530, 0xffffffff, 0xffffffff, 0xffffffff, 0x434dd931)}, + /* Input known to need 637 divsteps */ + {SECP256K1_FE_CONST(0xe34e9c95, 0x6bee8a84, 0x0dcb632a, 0xdb8a1320, 0x66885408, 0x06f3f996, 0x7c11ca84, 0x19199ec3), + SECP256K1_FE_CONST(0xbd2cbd8f, 0x1c536828, 0x9bccda44, 0x2582ac0c, 0x870152b0, 0x8a3f09fb, 0x1aaadf92, 0x19b618e5)}, + /* Input known to need 567 divsteps starting with delta=1/2. */ + {SECP256K1_FE_CONST(0xf6bc3ba3, 0x636451c4, 0x3e46357d, 0x2c21d619, 0x0988e234, 0x15985661, 0x6672982b, 0xa7549bfc), + SECP256K1_FE_CONST(0xb024fdc7, 0x5547451e, 0x426c585f, 0xbd481425, 0x73df6b75, 0xeef6d9d0, 0x389d87d4, 0xfbb440ba)}, + /* Input known to need 566 divsteps starting with delta=1/2. */ + {SECP256K1_FE_CONST(0xb595d81b, 0x2e3c1e2f, 0x482dbc65, 0xe4865af7, 0x9a0a50aa, 0x29f9e618, 0x6f87d7a5, 0x8d1063ae), + SECP256K1_FE_CONST(0xc983337c, 0x5d5c74e1, 0x49918330, 0x0b53afb5, 0xa0428a0b, 0xce6eef86, 0x059bd8ef, 0xe5b908de)}, + /* Set of 10 inputs accessing all 128 entries in the modinv32 divsteps_var table */ + {SECP256K1_FE_CONST(0x00000000, 0x00000000, 0xe0ff1f80, 0x1f000000, 0x00000000, 0x00000000, 0xfeff0100, 0x00000000), + SECP256K1_FE_CONST(0x9faf9316, 0x77e5049d, 0x0b5e7a1b, 0xef70b893, 0x18c9e30c, 0x045e7fd7, 0x29eddf8c, 0xd62e9e3d)}, + {SECP256K1_FE_CONST(0x621a538d, 0x511b2780, 0x35688252, 0x53f889a4, 0x6317c3ac, 0x32ba0a46, 0x6277c0d1, 0xccd31192), + SECP256K1_FE_CONST(0x38513b0c, 0x5eba856f, 0xe29e882e, 0x9b394d8c, 0x34bda011, 0xeaa66943, 0x6a841a4c, 0x6ae8bcff)}, + {SECP256K1_FE_CONST(0x00000200, 0xf0ffff1f, 0x00000000, 0x0000e0ff, 0xffffffff, 0xfffcffff, 0xffffffff, 0xffff0100), + SECP256K1_FE_CONST(0x5da42a52, 0x3640de9e, 0x13e64343, 0x0c7591b7, 0x6c1e3519, 0xf048c5b6, 0x0484217c, 0xedbf8b2f)}, + {SECP256K1_FE_CONST(0xd1343ef9, 0x4b952621, 0x7c52a2ee, 0x4ea1281b, 0x4ab46410, 0x9f26998d, 0xa686a8ff, 0x9f2103e8), + SECP256K1_FE_CONST(0x84044385, 0x9a4619bf, 0x74e35b6d, 0xa47e0c46, 0x6b7fb47d, 0x9ffab128, 0xb0775aa3, 0xcb318bd1)}, + {SECP256K1_FE_CONST(0xb27235d2, 0xc56a52be, 0x210db37a, 0xd50d23a4, 0xbe621bdd, 0x5df22c6a, 0xe926ba62, 0xd2e4e440), + SECP256K1_FE_CONST(0x67a26e54, 0x483a9d3c, 0xa568469e, 0xd258ab3d, 0xb9ec9981, 0xdca9b1bd, 0x8d2775fe, 0x53ae429b)}, + {SECP256K1_FE_CONST(0x00000000, 0x00000000, 0x00e0ffff, 0xffffff83, 0xffffffff, 0x3f00f00f, 0x000000e0, 0xffffffff), + SECP256K1_FE_CONST(0x310e10f8, 0x23bbfab0, 0xac94907d, 0x076c9a45, 0x8d357d7f, 0xc763bcee, 0x00d0e615, 0x5a6acef6)}, + {SECP256K1_FE_CONST(0xfeff0300, 0x001c0000, 0xf80700c0, 0x0ff0ffff, 0xffffffff, 0x0fffffff, 0xffff0100, 0x7f0000fe), + SECP256K1_FE_CONST(0x28e2fdb4, 0x0709168b, 0x86f598b0, 0x3453a370, 0x530cf21f, 0x32f978d5, 0x1d527a71, 0x59269b0c)}, + {SECP256K1_FE_CONST(0xc2591afa, 0x7bb98ef7, 0x090bb273, 0x85c14f87, 0xbb0b28e0, 0x54d3c453, 0x85c66753, 0xd5574d2f), + SECP256K1_FE_CONST(0xfdca70a2, 0x70ce627c, 0x95e66fae, 0x848a6dbb, 0x07ffb15c, 0x5f63a058, 0xba4140ed, 0x6113b503)}, + {SECP256K1_FE_CONST(0xf5475db3, 0xedc7b5a3, 0x411c047e, 0xeaeb452f, 0xc625828e, 0x1cf5ad27, 0x8eec1060, 0xc7d3e690), + SECP256K1_FE_CONST(0x5eb756c0, 0xf963f4b9, 0xdc6a215e, 0xec8cc2d8, 0x2e9dec01, 0xde5eb88d, 0x6aba7164, 0xaecb2c5a)}, + {SECP256K1_FE_CONST(0x00000000, 0x00f8ffff, 0xffffffff, 0x01000000, 0xe0ff1f00, 0x00000000, 0xffffff7f, 0x00000000), + SECP256K1_FE_CONST(0xe0d2e3d8, 0x49b6157d, 0xe54e88c2, 0x1a7f02ca, 0x7dd28167, 0xf1125d81, 0x7bfa444e, 0xbe110037)}, + /* Selection of randomly generated inputs that reach high/low d/e values in various configurations. */ + {SECP256K1_FE_CONST(0x13cc08a4, 0xd8c41f0f, 0x179c3e67, 0x54c46c67, 0xc4109221, 0x09ab3b13, 0xe24d9be1, 0xffffe950), + SECP256K1_FE_CONST(0xb80c8006, 0xd16abaa7, 0xcabd71e5, 0xcf6714f4, 0x966dd3d0, 0x64767a2d, 0xe92c4441, 0x51008cd1)}, + {SECP256K1_FE_CONST(0xaa6db990, 0x95efbca1, 0x3cc6ff71, 0x0602e24a, 0xf49ff938, 0x99fffc16, 0x46f40993, 0xc6e72057), + SECP256K1_FE_CONST(0xd5d3dd69, 0xb0c195e5, 0x285f1d49, 0xe639e48c, 0x9223f8a9, 0xca1d731d, 0x9ca482f9, 0xa5b93e06)}, + {SECP256K1_FE_CONST(0x1c680eac, 0xaeabffd8, 0x9bdc4aee, 0x1781e3de, 0xa3b08108, 0x0015f2e0, 0x94449e1b, 0x2f67a058), + SECP256K1_FE_CONST(0x7f083f8d, 0x31254f29, 0x6510f475, 0x245c373d, 0xc5622590, 0x4b323393, 0x32ed1719, 0xc127444b)}, + {SECP256K1_FE_CONST(0x147d44b3, 0x012d83f8, 0xc160d386, 0x1a44a870, 0x9ba6be96, 0x8b962707, 0x267cbc1a, 0xb65b2f0a), + SECP256K1_FE_CONST(0x555554ff, 0x170aef1e, 0x50a43002, 0xe51fbd36, 0xafadb458, 0x7a8aded1, 0x0ca6cd33, 0x6ed9087c)}, + {SECP256K1_FE_CONST(0x12423796, 0x22f0fe61, 0xf9ca017c, 0x5384d107, 0xa1fbf3b2, 0x3b018013, 0x916a3c37, 0x4000b98c), + SECP256K1_FE_CONST(0x20257700, 0x08668f94, 0x1177e306, 0x136c01f5, 0x8ed1fbd2, 0x95ec4589, 0xae38edb9, 0xfd19b6d7)}, + {SECP256K1_FE_CONST(0xdcf2d030, 0x9ab42cb4, 0x93ffa181, 0xdcd23619, 0x39699b52, 0x08909a20, 0xb5a17695, 0x3a9dcf21), + SECP256K1_FE_CONST(0x1f701dea, 0xe211fb1f, 0x4f37180d, 0x63a0f51c, 0x29fe1e40, 0xa40b6142, 0x2e7b12eb, 0x982b06b6)}, + {SECP256K1_FE_CONST(0x79a851f6, 0xa6314ed3, 0xb35a55e6, 0xca1c7d7f, 0xe32369ea, 0xf902432e, 0x375308c5, 0xdfd5b600), + SECP256K1_FE_CONST(0xcaae00c5, 0xe6b43851, 0x9dabb737, 0x38cba42c, 0xa02c8549, 0x7895dcbf, 0xbd183d71, 0xafe4476a)}, + {SECP256K1_FE_CONST(0xede78fdd, 0xcfc92bf1, 0x4fec6c6c, 0xdb8d37e2, 0xfb66bc7b, 0x28701870, 0x7fa27c9a, 0x307196ec), + SECP256K1_FE_CONST(0x68193a6c, 0x9a8b87a7, 0x2a760c64, 0x13e473f6, 0x23ae7bed, 0x1de05422, 0x88865427, 0xa3418265)}, + {SECP256K1_FE_CONST(0xa40b2079, 0xb8f88e89, 0xa7617997, 0x89baf5ae, 0x174df343, 0x75138eae, 0x2711595d, 0x3fc3e66c), + SECP256K1_FE_CONST(0x9f99c6a5, 0x6d685267, 0xd4b87c37, 0x9d9c4576, 0x358c692b, 0x6bbae0ed, 0x3389c93d, 0x7fdd2655)}, + {SECP256K1_FE_CONST(0x7c74c6b6, 0xe98d9151, 0x72645cf1, 0x7f06e321, 0xcefee074, 0x15b2113a, 0x10a9be07, 0x08a45696), + SECP256K1_FE_CONST(0x8c919a88, 0x898bc1e0, 0x77f26f97, 0x12e655b7, 0x9ba0ac40, 0xe15bb19e, 0x8364cc3b, 0xe227a8ee)}, + {SECP256K1_FE_CONST(0x109ba1ce, 0xdafa6d4a, 0xa1cec2b2, 0xeb1069f4, 0xb7a79e5b, 0xec6eb99b, 0xaec5f643, 0xee0e723e), + SECP256K1_FE_CONST(0x93d13eb8, 0x4bb0bcf9, 0xe64f5a71, 0xdbe9f359, 0x7191401c, 0x6f057a4a, 0xa407fe1b, 0x7ecb65cc)}, + {SECP256K1_FE_CONST(0x3db076cd, 0xec74a5c9, 0xf61dd138, 0x90e23e06, 0xeeedd2d0, 0x74cbc4e0, 0x3dbe1e91, 0xded36a78), + SECP256K1_FE_CONST(0x3f07f966, 0x8e2a1e09, 0x706c71df, 0x02b5e9d5, 0xcb92ddbf, 0xcdd53010, 0x16545564, 0xe660b107)}, + {SECP256K1_FE_CONST(0xe31c73ed, 0xb4c4b82c, 0x02ae35f7, 0x4cdec153, 0x98b522fd, 0xf7d2460c, 0x6bf7c0f8, 0x4cf67b0d), + SECP256K1_FE_CONST(0x4b8f1faf, 0x94e8b070, 0x19af0ff6, 0xa319cd31, 0xdf0a7ffb, 0xefaba629, 0x59c50666, 0x1fe5b843)}, + {SECP256K1_FE_CONST(0x4c8b0e6e, 0x83392ab6, 0xc0e3e9f1, 0xbbd85497, 0x16698897, 0xf552d50d, 0x79652ddb, 0x12f99870), + SECP256K1_FE_CONST(0x56d5101f, 0xd23b7949, 0x17dc38d6, 0xf24022ef, 0xcf18e70a, 0x5cc34424, 0x438544c3, 0x62da4bca)}, + {SECP256K1_FE_CONST(0xb0e040e2, 0x40cc35da, 0x7dd5c611, 0x7fccb178, 0x28888137, 0xbc930358, 0xea2cbc90, 0x775417dc), + SECP256K1_FE_CONST(0xca37f0d4, 0x016dd7c8, 0xab3ae576, 0x96e08d69, 0x68ed9155, 0xa9b44270, 0x900ae35d, 0x7c7800cd)}, + {SECP256K1_FE_CONST(0x8a32ea49, 0x7fbb0bae, 0x69724a9d, 0x8e2105b2, 0xbdf69178, 0x862577ef, 0x35055590, 0x667ddaef), + SECP256K1_FE_CONST(0xd02d7ead, 0xc5e190f0, 0x559c9d72, 0xdaef1ffc, 0x64f9f425, 0xf43645ea, 0x7341e08d, 0x11768e96)}, + {SECP256K1_FE_CONST(0xa3592d98, 0x9abe289d, 0x579ebea6, 0xbb0857a8, 0xe242ab73, 0x85f9a2ce, 0xb6998f0f, 0xbfffbfc6), + SECP256K1_FE_CONST(0x093c1533, 0x32032efa, 0x6aa46070, 0x0039599e, 0x589c35f4, 0xff525430, 0x7fe3777a, 0x44b43ddc)}, + {SECP256K1_FE_CONST(0x647178a3, 0x229e607b, 0xcc98521a, 0xcce3fdd9, 0x1e1bc9c9, 0x97fb7c6a, 0x61b961e0, 0x99b10709), + SECP256K1_FE_CONST(0x98217c13, 0xd51ddf78, 0x96310e77, 0xdaebd908, 0x602ca683, 0xcb46d07a, 0xa1fcf17e, 0xc8e2feb3)}, + {SECP256K1_FE_CONST(0x7334627c, 0x73f98968, 0x99464b4b, 0xf5964958, 0x1b95870d, 0xc658227e, 0x5e3235d8, 0xdcab5787), + SECP256K1_FE_CONST(0x000006fd, 0xc7e9dd94, 0x40ae367a, 0xe51d495c, 0x07603b9b, 0x2d088418, 0x6cc5c74c, 0x98514307)}, + {SECP256K1_FE_CONST(0x82e83876, 0x96c28938, 0xa50dd1c5, 0x605c3ad1, 0xc048637d, 0x7a50825f, 0x335ed01a, 0x00005760), + SECP256K1_FE_CONST(0xb0393f9f, 0x9f2aa55e, 0xf5607e2e, 0x5287d961, 0x60b3e704, 0xf3e16e80, 0xb4f9a3ea, 0xfec7f02d)}, + {SECP256K1_FE_CONST(0xc97b6cec, 0x3ee6b8dc, 0x98d24b58, 0x3c1970a1, 0xfe06297a, 0xae813529, 0xe76bb6bd, 0x771ae51d), + SECP256K1_FE_CONST(0x0507c702, 0xd407d097, 0x47ddeb06, 0xf6625419, 0x79f48f79, 0x7bf80d0b, 0xfc34b364, 0x253a5db1)}, + {SECP256K1_FE_CONST(0xd559af63, 0x77ea9bc4, 0x3cf1ad14, 0x5c7a4bbb, 0x10e7d18b, 0x7ce0dfac, 0x380bb19d, 0x0bb99bd3), + SECP256K1_FE_CONST(0x00196119, 0xb9b00d92, 0x34edfdb5, 0xbbdc42fc, 0xd2daa33a, 0x163356ca, 0xaa8754c8, 0xb0ec8b0b)}, + {SECP256K1_FE_CONST(0x8ddfa3dc, 0x52918da0, 0x640519dc, 0x0af8512a, 0xca2d33b2, 0xbde52514, 0xda9c0afc, 0xcb29fce4), + SECP256K1_FE_CONST(0xb3e4878d, 0x5cb69148, 0xcd54388b, 0xc23acce0, 0x62518ba8, 0xf09def92, 0x7b31e6aa, 0x6ba35b02)}, + {SECP256K1_FE_CONST(0xf8207492, 0xe3049f0a, 0x65285f2b, 0x0bfff996, 0x00ca112e, 0xc05da837, 0x546d41f9, 0x5194fb91), + SECP256K1_FE_CONST(0x7b7ee50b, 0xa8ed4bbd, 0xf6469930, 0x81419a5c, 0x071441c7, 0x290d046e, 0x3b82ea41, 0x611c5f95)}, + {SECP256K1_FE_CONST(0x050f7c80, 0x5bcd3c6b, 0x823cb724, 0x5ce74db7, 0xa4e39f5c, 0xbd8828d7, 0xfd4d3e07, 0x3ec2926a), + SECP256K1_FE_CONST(0x000d6730, 0xb0171314, 0x4764053d, 0xee157117, 0x48fd61da, 0xdea0b9db, 0x1d5e91c6, 0xbdc3f59e)}, + {SECP256K1_FE_CONST(0x3e3ea8eb, 0x05d760cf, 0x23009263, 0xb3cb3ac9, 0x088f6f0d, 0x3fc182a3, 0xbd57087c, 0xe67c62f9), + SECP256K1_FE_CONST(0xbe988716, 0xa29c1bf6, 0x4456aed6, 0xab1e4720, 0x49929305, 0x51043bf4, 0xebd833dd, 0xdd511e8b)}, + {SECP256K1_FE_CONST(0x6964d2a9, 0xa7fa6501, 0xa5959249, 0x142f4029, 0xea0c1b5f, 0x2f487ef6, 0x301ac80a, 0x768be5cd), + SECP256K1_FE_CONST(0x3918ffe4, 0x07492543, 0xed24d0b7, 0x3df95f8f, 0xaffd7cb4, 0x0de2191c, 0x9ec2f2ad, 0x2c0cb3c6)}, + {SECP256K1_FE_CONST(0x37c93520, 0xf6ddca57, 0x2b42fd5e, 0xb5c7e4de, 0x11b5b81c, 0xb95e91f3, 0x95c4d156, 0x39877ccb), + SECP256K1_FE_CONST(0x9a94b9b5, 0x57eb71ee, 0x4c975b8b, 0xac5262a8, 0x077b0595, 0xe12a6b1f, 0xd728edef, 0x1a6bf956)} + }; + /* Fixed test cases for scalar inverses: pairs of (x, 1/x) mod n. */ + static const rustsecp256k1zkp_v0_4_0_scalar scalar_cases[][2] = { + /* 0 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0), + SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0)}, + /* 1 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1), + SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1)}, + /* -1 */ + {SECP256K1_SCALAR_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xbaaedce6, 0xaf48a03b, 0xbfd25e8c, 0xd0364140), + SECP256K1_SCALAR_CONST(0xffffffff, 0xffffffff, 0xffffffff, 0xfffffffe, 0xbaaedce6, 0xaf48a03b, 0xbfd25e8c, 0xd0364140)}, + /* 2 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 2), + SECP256K1_SCALAR_CONST(0x7fffffff, 0xffffffff, 0xffffffff, 0xffffffff, 0x5d576e73, 0x57a4501d, 0xdfe92f46, 0x681b20a1)}, + /* 2**128 */ + {SECP256K1_SCALAR_CONST(0, 0, 0, 1, 0, 0, 0, 0), + SECP256K1_SCALAR_CONST(0x50a51ac8, 0x34b9ec24, 0x4b0dff66, 0x5588b13e, 0x9984d5b3, 0xcf80ef0f, 0xd6a23766, 0xa3ee9f22)}, + /* Input known to need 635 divsteps */ + {SECP256K1_SCALAR_CONST(0xcb9f1d35, 0xdd4416c2, 0xcd71bf3f, 0x6365da66, 0x3c9b3376, 0x8feb7ae9, 0x32a5ef60, 0x19199ec3), + SECP256K1_SCALAR_CONST(0x1d7c7bba, 0xf1893d53, 0xb834bd09, 0x36b411dc, 0x42c2e42f, 0xec72c428, 0x5e189791, 0x8e9bc708)}, + /* Input known to need 566 divsteps starting with delta=1/2. */ + {SECP256K1_SCALAR_CONST(0x7e3c993d, 0xa4272488, 0xbc015b49, 0x2db54174, 0xd382083a, 0xebe6db35, 0x80f82eff, 0xcd132c72), + SECP256K1_SCALAR_CONST(0x086f34a0, 0x3e631f76, 0x77418f28, 0xcc84ac95, 0x6304439d, 0x365db268, 0x312c6ded, 0xd0b934f8)}, + /* Input known to need 565 divsteps starting with delta=1/2. */ + {SECP256K1_SCALAR_CONST(0xbad7e587, 0x3f307859, 0x60d93147, 0x8a18491e, 0xb38a9fd5, 0x254350d3, 0x4b1f0e4b, 0x7dd6edc4), + SECP256K1_SCALAR_CONST(0x89f2df26, 0x39e2b041, 0xf19bd876, 0xd039c8ac, 0xc2223add, 0x29c4943e, 0x6632d908, 0x515f467b)}, + /* Selection of randomly generated inputs that reach low/high d/e values in various configurations. */ + {SECP256K1_SCALAR_CONST(0x1950d757, 0xb37a5809, 0x435059bb, 0x0bb8997e, 0x07e1e3c8, 0x5e5d7d2c, 0x6a0ed8e3, 0xdbde180e), + SECP256K1_SCALAR_CONST(0xbf72af9b, 0x750309e2, 0x8dda230b, 0xfe432b93, 0x7e25e475, 0x4388251e, 0x633d894b, 0x3bcb6f8c)}, + {SECP256K1_SCALAR_CONST(0x9bccf4e7, 0xc5a515e3, 0x50637aa9, 0xbb65a13f, 0x391749a1, 0x62de7d4e, 0xf6d7eabb, 0x3cd10ce0), + SECP256K1_SCALAR_CONST(0xaf2d5623, 0xb6385a33, 0xcd0365be, 0x5e92a70d, 0x7f09179c, 0x3baaf30f, 0x8f9cc83b, 0x20092f67)}, + {SECP256K1_SCALAR_CONST(0x73a57111, 0xb242952a, 0x5c5dee59, 0xf3be2ace, 0xa30a7659, 0xa46e5f47, 0xd21267b1, 0x39e642c9), + SECP256K1_SCALAR_CONST(0xa711df07, 0xcbcf13ef, 0xd61cc6be, 0xbcd058ce, 0xb02cf157, 0x272d4a18, 0x86d0feb3, 0xcd5fa004)}, + {SECP256K1_SCALAR_CONST(0x04884963, 0xce0580b1, 0xba547030, 0x3c691db3, 0x9cd2c84f, 0x24c7cebd, 0x97ebfdba, 0x3e785ec2), + SECP256K1_SCALAR_CONST(0xaaaaaf14, 0xd7c99ba7, 0x517ce2c1, 0x78a28b4c, 0x3769a851, 0xe5c5a03d, 0x4cc28f33, 0x0ec4dc5d)}, + {SECP256K1_SCALAR_CONST(0x1679ed49, 0x21f537b1, 0x815cb8ae, 0x9efc511c, 0x5b9fa037, 0x0b0f275e, 0x6c985281, 0x6c4a9905), + SECP256K1_SCALAR_CONST(0xb14ac3d5, 0x62b52999, 0xef34ead1, 0xffca4998, 0x0294341a, 0x1f8172aa, 0xea1624f9, 0x302eea62)}, + {SECP256K1_SCALAR_CONST(0x626b37c0, 0xf0057c35, 0xee982f83, 0x452a1fd3, 0xea826506, 0x48b08a9d, 0x1d2c4799, 0x4ad5f6ec), + SECP256K1_SCALAR_CONST(0xe38643b7, 0x567bfc2f, 0x5d2f1c15, 0xe327239c, 0x07112443, 0x69509283, 0xfd98e77a, 0xdb71c1e8)}, + {SECP256K1_SCALAR_CONST(0x1850a3a7, 0x759efc56, 0x54f287b2, 0x14d1234b, 0xe263bbc9, 0xcf4d8927, 0xd5f85f27, 0x965bd816), + SECP256K1_SCALAR_CONST(0x3b071831, 0xcac9619a, 0xcceb0596, 0xf614d63b, 0x95d0db2f, 0xc6a00901, 0x8eaa2621, 0xabfa0009)}, + {SECP256K1_SCALAR_CONST(0x94ae5d06, 0xa27dc400, 0x487d72be, 0xaa51ebed, 0xe475b5c0, 0xea675ffc, 0xf4df627a, 0xdca4222f), + SECP256K1_SCALAR_CONST(0x01b412ed, 0xd7830956, 0x1532537e, 0xe5e3dc99, 0x8fd3930a, 0x54f8d067, 0x32ef5760, 0x594438a5)}, + {SECP256K1_SCALAR_CONST(0x1f24278a, 0xb5bfe374, 0xa328dbbc, 0xebe35f48, 0x6620e009, 0xd58bb1b4, 0xb5a6bf84, 0x8815f63a), + SECP256K1_SCALAR_CONST(0xfe928416, 0xca5ba2d3, 0xfde513da, 0x903a60c7, 0x9e58ad8a, 0x8783bee4, 0x083a3843, 0xa608c914)}, + {SECP256K1_SCALAR_CONST(0xdc107d58, 0x274f6330, 0x67dba8bc, 0x26093111, 0x5201dfb8, 0x968ce3f5, 0xf34d1bd4, 0xf2146504), + SECP256K1_SCALAR_CONST(0x660cfa90, 0x13c3d93e, 0x7023b1e5, 0xedd09e71, 0x6d9c9d10, 0x7a3d2cdb, 0xdd08edc3, 0xaa78fcfb)}, + {SECP256K1_SCALAR_CONST(0x7cd1e905, 0xc6f02776, 0x2f551cc7, 0x5da61cff, 0x7da05389, 0x1119d5a4, 0x631c7442, 0x894fd4f7), + SECP256K1_SCALAR_CONST(0xff20862a, 0x9d3b1a37, 0x1628803b, 0x3004ccae, 0xaa23282a, 0xa89a1109, 0xd94ece5e, 0x181bdc46)}, + {SECP256K1_SCALAR_CONST(0x5b9dade8, 0x23d26c58, 0xcd12d818, 0x25b8ae97, 0x3dea04af, 0xf482c96b, 0xa062f254, 0x9e453640), + SECP256K1_SCALAR_CONST(0x50c38800, 0x15fa53f4, 0xbe1e5392, 0x5c9b120a, 0x262c22c7, 0x18fa0816, 0x5f2baab4, 0x8cb5db46)}, + {SECP256K1_SCALAR_CONST(0x11cdaeda, 0x969c464b, 0xef1f4ab0, 0x5b01d22e, 0x656fd098, 0x882bea84, 0x65cdbe7a, 0x0c19ff03), + SECP256K1_SCALAR_CONST(0x1968d0fa, 0xac46f103, 0xb55f1f72, 0xb3820bed, 0xec6b359a, 0x4b1ae0ad, 0x7e38e1fb, 0x295ccdfb)}, + {SECP256K1_SCALAR_CONST(0x2c351aa1, 0x26e91589, 0x194f8a1e, 0x06561f66, 0x0cb97b7f, 0x10914454, 0x134d1c03, 0x157266b4), + SECP256K1_SCALAR_CONST(0xbe49ada6, 0x92bd8711, 0x41b176c4, 0xa478ba95, 0x14883434, 0x9d1cd6f3, 0xcc4b847d, 0x22af80f5)}, + {SECP256K1_SCALAR_CONST(0x6ba07c6e, 0x13a60edb, 0x6247f5c3, 0x84b5fa56, 0x76fe3ec5, 0x80426395, 0xf65ec2ae, 0x623ba730), + SECP256K1_SCALAR_CONST(0x25ac23f7, 0x418cd747, 0x98376f9d, 0x4a11c7bf, 0x24c8ebfe, 0x4c8a8655, 0x345f4f52, 0x1c515595)}, + {SECP256K1_SCALAR_CONST(0x9397a712, 0x8abb6951, 0x2d4a3d54, 0x703b1c2a, 0x0661dca8, 0xd75c9b31, 0xaed4d24b, 0xd2ab2948), + SECP256K1_SCALAR_CONST(0xc52e8bef, 0xd55ce3eb, 0x1c897739, 0xeb9fb606, 0x36b9cd57, 0x18c51cc2, 0x6a87489e, 0xffd0dcf3)}, + {SECP256K1_SCALAR_CONST(0xe6a808cc, 0xeb437888, 0xe97798df, 0x4e224e44, 0x7e3b380a, 0x207c1653, 0x889f3212, 0xc6738b6f), + SECP256K1_SCALAR_CONST(0x31f9ae13, 0xd1e08b20, 0x757a2e5e, 0x5243a0eb, 0x8ae35f73, 0x19bb6122, 0xb910f26b, 0xda70aa55)}, + {SECP256K1_SCALAR_CONST(0xd0320548, 0xab0effe7, 0xa70779e0, 0x61a347a6, 0xb8c1e010, 0x9d5281f8, 0x2ee588a6, 0x80000000), + SECP256K1_SCALAR_CONST(0x1541897e, 0x78195c90, 0x7583dd9e, 0x728b6100, 0xbce8bc6d, 0x7a53b471, 0x5dcd9e45, 0x4425fcaf)}, + {SECP256K1_SCALAR_CONST(0x93d623f1, 0xd45b50b0, 0x796e9186, 0x9eac9407, 0xd30edc20, 0xef6304cf, 0x250494e7, 0xba503de9), + SECP256K1_SCALAR_CONST(0x7026d638, 0x1178b548, 0x92043952, 0x3c7fb47c, 0xcd3ea236, 0x31d82b01, 0x612fc387, 0x80b9b957)}, + {SECP256K1_SCALAR_CONST(0xf860ab39, 0x55f5d412, 0xa4d73bcc, 0x3b48bd90, 0xc248ffd3, 0x13ca10be, 0x8fba84cc, 0xdd28d6a3), + SECP256K1_SCALAR_CONST(0x5c32fc70, 0xe0b15d67, 0x76694700, 0xfe62be4d, 0xeacdb229, 0x7a4433d9, 0x52155cd0, 0x7649ab59)}, + {SECP256K1_SCALAR_CONST(0x4e41311c, 0x0800af58, 0x7a690a8e, 0xe175c9ba, 0x6981ab73, 0xac532ea8, 0x5c1f5e63, 0x6ac1f189), + SECP256K1_SCALAR_CONST(0xfffffff9, 0xd075982c, 0x7fbd3825, 0xc05038a2, 0x4533b91f, 0x94ec5f45, 0xb280b28f, 0x842324dc)}, + {SECP256K1_SCALAR_CONST(0x48e473bf, 0x3555eade, 0xad5d7089, 0x2424c4e4, 0x0a99397c, 0x2dc796d8, 0xb7a43a69, 0xd0364141), + SECP256K1_SCALAR_CONST(0x634976b2, 0xa0e47895, 0x1ec38593, 0x266d6fd0, 0x6f602644, 0x9bb762f1, 0x7180c704, 0xe23a4daa)}, + {SECP256K1_SCALAR_CONST(0xbe83878d, 0x3292fc54, 0x26e71c62, 0x556ccedc, 0x7cbb8810, 0x4032a720, 0x34ead589, 0xe4d6bd13), + SECP256K1_SCALAR_CONST(0x6cd150ad, 0x25e59d0f, 0x74cbae3d, 0x6377534a, 0x1e6562e8, 0xb71b9d18, 0xe1e5d712, 0x8480abb3)}, + {SECP256K1_SCALAR_CONST(0xcdddf2e5, 0xefc15f88, 0xc9ee06de, 0x8a846ca9, 0x28561581, 0x68daa5fb, 0xd1cf3451, 0xeb1782d0), + SECP256K1_SCALAR_CONST(0xffffffd9, 0xed8d2af4, 0x993c865a, 0x23e9681a, 0x3ca3a3dc, 0xe6d5a46e, 0xbd86bd87, 0x61b55c70)}, + {SECP256K1_SCALAR_CONST(0xb6a18f1f, 0x04872df9, 0x08165ec4, 0x319ca19c, 0x6c0359ab, 0x1f7118fb, 0xc2ef8082, 0xca8b7785), + SECP256K1_SCALAR_CONST(0xff55b19b, 0x0f1ac78c, 0x0f0c88c2, 0x2358d5ad, 0x5f455e4e, 0x3330b72f, 0x274dc153, 0xffbf272b)}, + {SECP256K1_SCALAR_CONST(0xea4898e5, 0x30eba3e8, 0xcf0e5c3d, 0x06ec6844, 0x01e26fb6, 0x75636225, 0xc5d08f4c, 0x1decafa0), + SECP256K1_SCALAR_CONST(0xe5a014a8, 0xe3c4ec1e, 0xea4f9b32, 0xcfc7b386, 0x00630806, 0x12c08d02, 0x6407ccc2, 0xb067d90e)}, + {SECP256K1_SCALAR_CONST(0x70e9aea9, 0x7e933af0, 0x8a23bfab, 0x23e4b772, 0xff951863, 0x5ffcf47d, 0x6bebc918, 0x2ca58265), + SECP256K1_SCALAR_CONST(0xf4e00006, 0x81bc6441, 0x4eb6ec02, 0xc194a859, 0x80ad7c48, 0xba4e9afb, 0x8b6bdbe0, 0x989d8f77)}, + {SECP256K1_SCALAR_CONST(0x3c56c774, 0x46efe6f0, 0xe93618b8, 0xf9b5a846, 0xd247df61, 0x83b1e215, 0x06dc8bcc, 0xeefc1bf5), + SECP256K1_SCALAR_CONST(0xfff8937a, 0x2cd9586b, 0x43c25e57, 0xd1cefa7a, 0x9fb91ed3, 0x95b6533d, 0x8ad0de5b, 0xafb93f00)}, + {SECP256K1_SCALAR_CONST(0xfb5c2772, 0x5cb30e83, 0xe38264df, 0xe4e3ebf3, 0x392aa92e, 0xa68756a1, 0x51279ac5, 0xb50711a8), + SECP256K1_SCALAR_CONST(0x000013af, 0x1105bfe7, 0xa6bbd7fb, 0x3d638f99, 0x3b266b02, 0x072fb8bc, 0x39251130, 0x2e0fd0ea)} + }; + int i, var, testrand; + unsigned char b32[32]; + rustsecp256k1zkp_v0_4_0_fe x_fe; + rustsecp256k1zkp_v0_4_0_scalar x_scalar; + memset(b32, 0, sizeof(b32)); + /* Test fixed test cases through test_inverse_{scalar,field}, both ways. */ + for (i = 0; (size_t)i < sizeof(fe_cases)/sizeof(fe_cases[0]); ++i) { + for (var = 0; var <= 1; ++var) { + test_inverse_field(&x_fe, &fe_cases[i][0], var); + check_fe_equal(&x_fe, &fe_cases[i][1]); + test_inverse_field(&x_fe, &fe_cases[i][1], var); + check_fe_equal(&x_fe, &fe_cases[i][0]); + } + } + for (i = 0; (size_t)i < sizeof(scalar_cases)/sizeof(scalar_cases[0]); ++i) { + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(&x_scalar, &scalar_cases[i][0], var); + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&x_scalar, &scalar_cases[i][1])); + test_inverse_scalar(&x_scalar, &scalar_cases[i][1], var); + CHECK(rustsecp256k1zkp_v0_4_0_scalar_eq(&x_scalar, &scalar_cases[i][0])); + } + } + /* Test inputs 0..999 and their respective negations. */ + for (i = 0; i < 1000; ++i) { + b32[31] = i & 0xff; + b32[30] = (i >> 8) & 0xff; + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1zkp_v0_4_0_fe_set_b32(&x_fe, b32); + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(NULL, &x_scalar, var); + test_inverse_field(NULL, &x_fe, var); + } + rustsecp256k1zkp_v0_4_0_scalar_negate(&x_scalar, &x_scalar); + rustsecp256k1zkp_v0_4_0_fe_negate(&x_fe, &x_fe, 1); + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(NULL, &x_scalar, var); + test_inverse_field(NULL, &x_fe, var); + } + } + /* test 128*count random inputs; half with testrand256_test, half with testrand256 */ + for (testrand = 0; testrand <= 1; ++testrand) { + for (i = 0; i < 64 * count; ++i) { + (testrand ? rustsecp256k1zkp_v0_4_0_testrand256_test : rustsecp256k1zkp_v0_4_0_testrand256)(b32); + rustsecp256k1zkp_v0_4_0_scalar_set_b32(&x_scalar, b32, NULL); + rustsecp256k1zkp_v0_4_0_fe_set_b32(&x_fe, b32); + for (var = 0; var <= 1; ++var) { + test_inverse_scalar(NULL, &x_scalar, var); + test_inverse_field(NULL, &x_fe, var); + } + } + } +} + /***** GROUP TESTS *****/ void ge_equals_ge(const rustsecp256k1zkp_v0_4_0_ge *a, const rustsecp256k1zkp_v0_4_0_ge *b) { @@ -2401,20 +3353,34 @@ void test_ge(void) { /* Test batch gej -> ge conversion with many infinities. */ for (i = 0; i < 4 * runs + 1; i++) { + int odd; random_group_element_test(&ge[i]); + odd = rustsecp256k1zkp_v0_4_0_fe_is_odd(&ge[i].x); + CHECK(odd == 0 || odd == 1); /* randomly set half the points to infinity */ - if(rustsecp256k1zkp_v0_4_0_fe_is_odd(&ge[i].x)) { + if (odd == i % 2) { rustsecp256k1zkp_v0_4_0_ge_set_infinity(&ge[i]); } rustsecp256k1zkp_v0_4_0_gej_set_ge(&gej[i], &ge[i]); } - /* batch invert */ + /* batch convert */ rustsecp256k1zkp_v0_4_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); /* check result */ for (i = 0; i < 4 * runs + 1; i++) { ge_equals_gej(&ge[i], &gej[i]); } + /* Test batch gej -> ge conversion with all infinities. */ + for (i = 0; i < 4 * runs + 1; i++) { + rustsecp256k1zkp_v0_4_0_gej_set_infinity(&gej[i]); + } + /* batch convert */ + rustsecp256k1zkp_v0_4_0_ge_set_all_gej_var(ge, gej, 4 * runs + 1); + /* check result */ + for (i = 0; i < 4 * runs + 1; i++) { + CHECK(rustsecp256k1zkp_v0_4_0_ge_is_infinity(&ge[i])); + } + free(ge); free(gej); } @@ -4557,8 +5523,10 @@ void test_ecdsa_sign_verify(void) { rustsecp256k1zkp_v0_4_0_scalar one; rustsecp256k1zkp_v0_4_0_scalar msg, key; rustsecp256k1zkp_v0_4_0_scalar sigr, sigs; - int recid; int getrec; + /* Initialize recid to suppress a false positive -Wconditional-uninitialized in clang. + VG_UNDEF ensures that valgrind will still treat the variable as uninitialized. */ + int recid = -1; VG_UNDEF(&recid, sizeof(recid)); random_scalar_order_test(&msg); random_scalar_order_test(&key); rustsecp256k1zkp_v0_4_0_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key); @@ -4838,6 +5806,55 @@ void test_random_pubkeys(void) { } } +void run_pubkey_comparison(void) { + unsigned char pk1_ser[33] = { + 0x02, + 0x58, 0x84, 0xb3, 0xa2, 0x4b, 0x97, 0x37, 0x88, 0x92, 0x38, 0xa6, 0x26, 0x62, 0x52, 0x35, 0x11, + 0xd0, 0x9a, 0xa1, 0x1b, 0x80, 0x0b, 0x5e, 0x93, 0x80, 0x26, 0x11, 0xef, 0x67, 0x4b, 0xd9, 0x23 + }; + const unsigned char pk2_ser[33] = { + 0x02, + 0xde, 0x36, 0x0e, 0x87, 0x59, 0x8f, 0x3c, 0x01, 0x36, 0x2a, 0x2a, 0xb8, 0xc6, 0xf4, 0x5e, 0x4d, + 0xb2, 0xc2, 0xd5, 0x03, 0xa7, 0xf9, 0xf1, 0x4f, 0xa8, 0xfa, 0x95, 0xa8, 0xe9, 0x69, 0x76, 0x1c + }; + rustsecp256k1zkp_v0_4_0_pubkey pk1; + rustsecp256k1zkp_v0_4_0_pubkey pk2; + int32_t ecount = 0; + + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_parse(ctx, &pk1, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_parse(ctx, &pk2, pk2_ser, sizeof(pk2_ser)) == 1); + + rustsecp256k1zkp_v0_4_0_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, NULL, &pk2) < 0); + CHECK(ecount == 1); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk1, NULL) > 0); + CHECK(ecount == 2); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk1, &pk1) == 0); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk2, &pk2) == 0); + CHECK(ecount == 2); + { + rustsecp256k1zkp_v0_4_0_pubkey pk_tmp; + memset(&pk_tmp, 0, sizeof(pk_tmp)); /* illegal pubkey */ + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk_tmp, &pk2) < 0); + CHECK(ecount == 3); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk_tmp, &pk_tmp) == 0); + CHECK(ecount == 5); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk2, &pk_tmp) > 0); + CHECK(ecount == 6); + } + + rustsecp256k1zkp_v0_4_0_context_set_illegal_callback(ctx, NULL, NULL); + + /* Make pk2 the same as pk1 but with 3 rather than 2. Note that in + * an uncompressed encoding, these would have the opposite ordering */ + pk1_ser[0] = 3; + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_parse(ctx, &pk2, pk1_ser, sizeof(pk1_ser)) == 1); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk1, &pk2) < 0); + CHECK(rustsecp256k1zkp_v0_4_0_ec_pubkey_cmp(ctx, &pk2, &pk1) > 0); +} + void run_random_pubkeys(void) { int i; for (i = 0; i < 10*count; i++) { @@ -5840,7 +6857,7 @@ int main(int argc, char **argv) { count = strtol(argv[1], NULL, 0); } else { const char* env = getenv("SECP256K1_TEST_ITERS"); - if (env) { + if (env && strlen(env) > 0) { count = strtol(env, NULL, 0); } } @@ -5868,23 +6885,22 @@ int main(int argc, char **argv) { run_rand_int(); run_util_tests(); + run_ctz_tests(); + run_modinv_tests(); + run_inverse_tests(); + run_sha256_tests(); run_hmac_sha256_tests(); run_rfc6979_hmac_sha256_tests(); - -#ifndef USE_NUM_NONE - /* num tests */ - run_num_smalltests(); -#endif + run_tagged_sha256_tests(); /* scalar tests */ run_scalar_tests(); /* field tests */ - run_field_inv(); - run_field_inv_var(); run_field_misc(); run_field_convert(); + run_fe_mul(); run_sqr(); run_sqrt(); @@ -5926,6 +6942,7 @@ int main(int argc, char **argv) { #endif /* ecdsa tests */ + run_pubkey_comparison(); run_random_pubkeys(); run_ecdsa_der_parse(); run_ecdsa_sign_verify(); diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/tests_exhaustive.c b/secp256k1-zkp-sys/depend/secp256k1/src/tests_exhaustive.c index 0188ccf0..55814059 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/tests_exhaustive.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/tests_exhaustive.c @@ -10,7 +10,6 @@ #include #include - #include #undef USE_ECMULT_STATIC_PRECOMPUTATION @@ -20,10 +19,10 @@ #define EXHAUSTIVE_TEST_ORDER 13 #endif -#include "include/secp256k1.h" +#include "secp256k1.c" +#include "../include/secp256k1.h" #include "assumptions.h" #include "group.h" -#include "secp256k1.c" #include "testrand_impl.h" static int count = 2; @@ -303,6 +302,7 @@ void test_exhaustive_sign(const rustsecp256k1zkp_v0_4_0_context *ctx, const rust if (skip_section(&iter)) continue; for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ const int starting_k = k; + int ret; rustsecp256k1zkp_v0_4_0_ecdsa_signature sig; rustsecp256k1zkp_v0_4_0_scalar sk, msg, r, s, expected_r; unsigned char sk32[32], msg32[32]; @@ -311,7 +311,8 @@ void test_exhaustive_sign(const rustsecp256k1zkp_v0_4_0_context *ctx, const rust rustsecp256k1zkp_v0_4_0_scalar_get_b32(sk32, &sk); rustsecp256k1zkp_v0_4_0_scalar_get_b32(msg32, &msg); - rustsecp256k1zkp_v0_4_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1zkp_v0_4_0_nonce_function_smallint, &k); + ret = rustsecp256k1zkp_v0_4_0_ecdsa_sign(ctx, &sig, msg32, sk32, rustsecp256k1zkp_v0_4_0_nonce_function_smallint, &k); + CHECK(ret == 1); rustsecp256k1zkp_v0_4_0_ecdsa_signature_load(ctx, &r, &s, &sig); /* Note that we compute expected_r *after* signing -- this is important diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/util.h b/secp256k1-zkp-sys/depend/secp256k1/src/util.h index 9aab30eb..daad6a6d 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/util.h +++ b/secp256k1-zkp-sys/depend/secp256k1/src/util.h @@ -286,4 +286,69 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t; # endif #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1zkp_v0_4_0_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz32_var_debruijn(uint32_t x) { + static const uint8_t debruijn[32] = { + 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, + 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, + 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B + }; + return debruijn[((x & -x) * 0x04D7651F) >> 27]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1zkp_v0_4_0_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz64_var_debruijn(uint64_t x) { + static const uint8_t debruijn[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; + return debruijn[((x & -x) * 0x022FDD63CC95386D) >> 58]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz32_var(uint32_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ + if (((unsigned)UINT32_MAX) == UINT32_MAX) { + return __builtin_ctz(x); + } +#endif +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzl (the unsigned long type is always at least 32 bits). */ + return __builtin_ctzl(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1zkp_v0_4_0_ctz32_var_debruijn(x); +#endif +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz64_var(uint64_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ + if (((unsigned long)UINT64_MAX) == UINT64_MAX) { + return __builtin_ctzl(x); + } +#endif +#if (__has_builtin(__builtin_ctzll) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzll (the unsigned long long type is always at least 64 bits). */ + return __builtin_ctzll(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1zkp_v0_4_0_ctz64_var_debruijn(x); +#endif +} + #endif /* SECP256K1_UTIL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/util.h.orig b/secp256k1-zkp-sys/depend/secp256k1/src/util.h.orig index b6546251..be07b508 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/util.h.orig +++ b/secp256k1-zkp-sys/depend/secp256k1/src/util.h.orig @@ -302,4 +302,69 @@ SECP256K1_GNUC_EXT typedef __int128 int128_t; # endif #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1zkp_v0_4_0_ctz32_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz32_var_debruijn(uint32_t x) { + static const uint8_t debruijn[32] = { + 0x00, 0x01, 0x02, 0x18, 0x03, 0x13, 0x06, 0x19, 0x16, 0x04, 0x14, 0x0A, + 0x10, 0x07, 0x0C, 0x1A, 0x1F, 0x17, 0x12, 0x05, 0x15, 0x09, 0x0F, 0x0B, + 0x1E, 0x11, 0x08, 0x0E, 0x1D, 0x0D, 0x1C, 0x1B + }; + return debruijn[((x & -x) * 0x04D7651F) >> 27]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. + * This function is only intended to be used as fallback for + * rustsecp256k1zkp_v0_4_0_ctz64_var, but permits it to be tested separately. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz64_var_debruijn(uint64_t x) { + static const uint8_t debruijn[64] = { + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 + }; + return debruijn[((x & -x) * 0x022FDD63CC95386D) >> 58]; +} + +/* Determine the number of trailing zero bits in a (non-zero) 32-bit x. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz32_var(uint32_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctz) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned type is sufficient to represent the largest uint32_t, consider __builtin_ctz. */ + if (((unsigned)UINT32_MAX) == UINT32_MAX) { + return __builtin_ctz(x); + } +#endif +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzl (the unsigned long type is always at least 32 bits). */ + return __builtin_ctzl(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1zkp_v0_4_0_ctz32_var_debruijn(x); +#endif +} + +/* Determine the number of trailing zero bits in a (non-zero) 64-bit x. */ +static SECP256K1_INLINE int rustsecp256k1zkp_v0_4_0_ctz64_var(uint64_t x) { + VERIFY_CHECK(x != 0); +#if (__has_builtin(__builtin_ctzl) || SECP256K1_GNUC_PREREQ(3,4)) + /* If the unsigned long type is sufficient to represent the largest uint64_t, consider __builtin_ctzl. */ + if (((unsigned long)UINT64_MAX) == UINT64_MAX) { + return __builtin_ctzl(x); + } +#endif +#if (__has_builtin(__builtin_ctzll) || SECP256K1_GNUC_PREREQ(3,4)) + /* Otherwise consider __builtin_ctzll (the unsigned long long type is always at least 64 bits). */ + return __builtin_ctzll(x); +#else + /* If no suitable CTZ builtin is available, use a (variable time) software emulation. */ + return rustsecp256k1zkp_v0_4_0_ctz64_var_debruijn(x); +#endif +} + #endif /* SECP256K1_UTIL_H */ diff --git a/secp256k1-zkp-sys/depend/secp256k1/src/valgrind_ctime_test.c b/secp256k1-zkp-sys/depend/secp256k1/src/valgrind_ctime_test.c index 3462ac5e..10836771 100644 --- a/secp256k1-zkp-sys/depend/secp256k1/src/valgrind_ctime_test.c +++ b/secp256k1-zkp-sys/depend/secp256k1/src/valgrind_ctime_test.c @@ -6,25 +6,26 @@ #include #include +#include -#include "include/secp256k1.h" +#include "../include/secp256k1.h" #include "assumptions.h" #include "util.h" #ifdef ENABLE_MODULE_ECDH -#include "include/secp256k1_ecdh.h" +#include "../include/secp256k1_ecdh.h" #endif #ifdef ENABLE_MODULE_RECOVERY -#include "include/secp256k1_recovery.h" +#include "../include/secp256k1_recovery.h" #endif #ifdef ENABLE_MODULE_EXTRAKEYS -#include "include/secp256k1_extrakeys.h" +#include "../include/secp256k1_extrakeys.h" #endif #ifdef ENABLE_MODULE_SCHNORRSIG -#include "include/secp256k1_schnorrsig.h" +#include "../include/secp256k1_schnorrsig.h" #endif #ifdef ENABLE_MODULE_ECDSA_S2C @@ -35,6 +36,10 @@ #include "include/secp256k1_ecdsa_adaptor.h" #endif +#ifdef ENABLE_MODULE_MUSIG +#include "include/secp256k1_musig.h" +#endif + void run_tests(rustsecp256k1zkp_v0_4_0_context *ctx, unsigned char *key); int main(void) { @@ -174,7 +179,7 @@ void run_tests(rustsecp256k1zkp_v0_4_0_context *ctx, unsigned char *key) { ret = rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, key); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); - ret = rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL, NULL); + ret = rustsecp256k1zkp_v0_4_0_schnorrsig_sign(ctx, sig, msg, &keypair, NULL); VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); CHECK(ret == 1); #endif @@ -241,4 +246,48 @@ void run_tests(rustsecp256k1zkp_v0_4_0_context *ctx, unsigned char *key) { CHECK(ret == 0); } #endif + +#ifdef ENABLE_MODULE_MUSIG + { + rustsecp256k1zkp_v0_4_0_xonly_pubkey pk; + const rustsecp256k1zkp_v0_4_0_xonly_pubkey *pk_ptr[1]; + rustsecp256k1zkp_v0_4_0_xonly_pubkey agg_pk; + unsigned char session_id[32]; + rustsecp256k1zkp_v0_4_0_musig_secnonce secnonce; + rustsecp256k1zkp_v0_4_0_musig_pubnonce pubnonce; + const rustsecp256k1zkp_v0_4_0_musig_pubnonce *pubnonce_ptr[1]; + rustsecp256k1zkp_v0_4_0_musig_aggnonce aggnonce; + rustsecp256k1zkp_v0_4_0_musig_keyagg_cache cache; + rustsecp256k1zkp_v0_4_0_musig_session session; + rustsecp256k1zkp_v0_4_0_musig_partial_sig partial_sig; + unsigned char extra_input[32]; + + pk_ptr[0] = &pk; + pubnonce_ptr[0] = &pubnonce; + VALGRIND_MAKE_MEM_DEFINED(key, 32); + memcpy(session_id, key, sizeof(session_id)); + session_id[0] = key[0] + 1; + memcpy(extra_input, session_id, sizeof(extra_input)); + extra_input[0] = session_id[0] + 1; + + CHECK(rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, key)); + CHECK(rustsecp256k1zkp_v0_4_0_keypair_xonly_pub(ctx, &pk, NULL, &keypair)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_pubkey_agg(ctx, NULL, &agg_pk, &cache, pk_ptr, 1) == 1); + VALGRIND_MAKE_MEM_UNDEFINED(key, 32); + VALGRIND_MAKE_MEM_UNDEFINED(session_id, 32); + VALGRIND_MAKE_MEM_UNDEFINED(extra_input, 32); + ret = rustsecp256k1zkp_v0_4_0_musig_nonce_gen(ctx, &secnonce, &pubnonce, session_id, key, msg, &cache, extra_input); + VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); + CHECK(ret == 1); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_agg(ctx, &aggnonce, pubnonce_ptr, 1)); + CHECK(rustsecp256k1zkp_v0_4_0_musig_nonce_process(ctx, &session, &aggnonce, msg, &cache, NULL) == 1); + + ret = rustsecp256k1zkp_v0_4_0_keypair_create(ctx, &keypair, key); + VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); + CHECK(ret == 1); + ret = rustsecp256k1zkp_v0_4_0_musig_partial_sign(ctx, &partial_sig, &secnonce, &keypair, &cache, &session); + VALGRIND_MAKE_MEM_DEFINED(&ret, sizeof(ret)); + CHECK(ret == 1); + } +#endif } diff --git a/secp256k1-zkp-sys/src/zkp.rs b/secp256k1-zkp-sys/src/zkp.rs index 88a50eb4..52da08f2 100644 --- a/secp256k1-zkp-sys/src/zkp.rs +++ b/secp256k1-zkp-sys/src/zkp.rs @@ -1,5 +1,6 @@ use core::{fmt, hash}; -use {types::*, Context, NonceFn, PublicKey, Signature}; +use {types::*, Context, KeyPair, NonceFn, PublicKey, Signature, XOnlyPublicKey}; +use {secp256k1_xonly_pubkey_from_pubkey}; /// Rangeproof maximum length pub const RANGEPROOF_MAX_LENGTH: size_t = 5134; @@ -349,6 +350,19 @@ extern "C" { input_len: size_t, ) -> c_int; + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_pubkey_agg" + )] + pub fn secp256k1_musig_pubkey_agg( + cx: *const Context, + scratch: *mut ScratchSpace, + combined_pk: *mut XOnlyPublicKey, + pre_session: *mut MusigKeyaggCache, + pubkeys: *const *const XOnlyPublicKey, + n_pubkeys: size_t, + ) -> c_int; + #[cfg_attr( not(feature = "external-symbols"), link_name = "rustsecp256k1zkp_v0_4_0_whitelist_signature_serialize" @@ -360,6 +374,17 @@ extern "C" { sig: *const WhitelistSignature, ) -> c_int; + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_pubkey_tweak_add" + )] + pub fn secp256k1_musig_pubkey_tweak_add( + cx: *const Context, + output_pubkey: *mut PublicKey, + tweak32: *const c_uchar, + keyagg_cache: *mut MusigKeyaggCache, + ) -> c_int; + #[cfg_attr( not(feature = "external-symbols"), link_name = "rustsecp256k1zkp_v0_4_0_whitelist_sign" @@ -378,6 +403,21 @@ extern "C" { noncedata: *mut c_void, ) -> c_int; + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_nonce_gen" + )] + pub fn secp256k1_musig_nonce_gen( + cx: *const Context, + secnonce: *mut MusigSecNonce, + pubnonce: *mut MusigPubNonce, + session_id32: *const c_uchar, + seckey: *const c_uchar, + msg32: *const c_uchar, + keyagg_cache: *const MusigKeyaggCache, + extra_input32: *const c_uchar, + ) -> c_int; + #[cfg_attr( not(feature = "external-symbols"), link_name = "rustsecp256k1zkp_v0_4_0_whitelist_verify" @@ -390,6 +430,161 @@ extern "C" { n_keys: size_t, sub_pubkey: *const PublicKey, ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_nonce_agg" + )] + pub fn secp256k1_musig_nonce_agg( + cx: *const Context, + aggnonce: *const MusigAggNonce, + pubnonces: *const *const MusigPubNonce, + n_pubnonces: size_t, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_nonce_process" + )] + pub fn secp256k1_musig_nonce_process( + cx: *const Context, + session: *mut MusigSession, + aggnonce: *const MusigAggNonce, + msg32: *const c_uchar, + keyagg_cache: *const MusigKeyaggCache, + adaptor: *const PublicKey, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_pubnonce_serialize" + )] + pub fn secp256k1_musig_pubnonce_serialize( + cx: *const Context, + out32: *mut c_uchar, + nonce: *const MusigPubNonce, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_pubnonce_parse" + )] + pub fn secp256k1_musig_pubnonce_parse( + cx: *const Context, + nonce: *mut MusigPubNonce, + in32: *const c_uchar, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_aggnonce_serialize" + )] + pub fn secp256k1_musig_aggnonce_serialize( + cx: *const Context, + out32: *mut c_uchar, + nonce: *const MusigAggNonce, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_aggnonce_parse" + )] + pub fn secp256k1_musig_aggnonce_parse( + cx: *const Context, + nonce: *mut MusigAggNonce, + in32: *const c_uchar, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_partial_sig_serialize" + )] + pub fn secp256k1_musig_partial_sig_serialize( + cx: *const Context, + out32: *mut c_uchar, + sig: *const MusigPartialSignature, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_partial_sig_parse" + )] + pub fn secp256k1_musig_partial_sig_parse( + cx: *const Context, + sig: *mut MusigPartialSignature, + in32: *const c_uchar, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_partial_sign" + )] + pub fn secp256k1_musig_partial_sign( + cx: *const Context, + partial_sig: *mut MusigPartialSignature, + secnonce: *mut MusigSecNonce, + keypair: *const KeyPair, + keyagg_cache: *const MusigKeyaggCache, + session: *const MusigSession, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_partial_sig_verify" + )] + pub fn secp256k1_musig_partial_sig_verify( + cx: *const Context, + partial_sig: *const MusigPartialSignature, + pubnonce: *const MusigPubNonce, + pubkey: *const XOnlyPublicKey, + keyagg_cache: *const MusigKeyaggCache, + session: *const MusigSession, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_partial_sig_agg" + )] + pub fn secp256k1_musig_partial_sig_agg( + cx: *const Context, + sig64: *mut c_uchar, + session: *const MusigSession, + partial_sigs: *const *const MusigPartialSignature, + n_sigs: size_t, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_nonce_parity" + )] + pub fn secp256k1_musig_nonce_parity( + cx: *const Context, + nonce_parity: *mut c_int, + session: *mut MusigSession, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_adapt" + )] + pub fn secp256k1_musig_adapt( + cx: *const Context, + sig64: *mut c_uchar, + sec_adaptor32: *const c_uchar, + nonce_parity: c_int, + ) -> c_int; + + #[cfg_attr( + not(feature = "external-symbols"), + link_name = "rustsecp256k1zkp_v0_4_0_musig_extract_adaptor" + )] + pub fn secp256k1_musig_extract_adaptor( + cx: *const Context, + sec_adaptor32: *mut c_uchar, + sig64: *const c_uchar, + pre_sig64: *const c_uchar, + nonce_parity: c_int, + ) -> c_int; } #[repr(C)] @@ -599,3 +794,94 @@ impl EcdsaAdaptorSignature { &self.0 } } + +#[repr(C)] +pub struct ScratchSpace(c_int); + +pub const MUSIG_KEYAGG_LEN: usize = 165; +pub const MUSIG_SECNONCE_LEN: usize = 68; +pub const MUSIG_PUBNONCE_LEN: usize = 132; +pub const MUSIG_AGGNONCE_LEN: usize = 132; +pub const MUSIG_SESSION_LEN: usize = 133; +pub const MUSIG_PART_SIG_LEN: usize = 36; + +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct MusigKeyaggCache { + pub data: [c_uchar; MUSIG_KEYAGG_LEN], +} + +impl MusigKeyaggCache { + pub fn new() -> Self { + Self { data: [0; MUSIG_KEYAGG_LEN] } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct MusigSecNonce { + pub data: [c_uchar; MUSIG_SECNONCE_LEN], +} + +impl MusigSecNonce { + pub fn new() -> Self { + Self { data: [0; MUSIG_SECNONCE_LEN] } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct MusigPubNonce { + pub data: [c_uchar; MUSIG_PUBNONCE_LEN], +} + +impl MusigPubNonce { + pub fn new() -> Self { + Self { data: [0; MUSIG_PUBNONCE_LEN] } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct MusigAggNonce { + pub data: [c_uchar; MUSIG_AGGNONCE_LEN], +} + +impl MusigAggNonce { + pub fn new() -> Self { + Self { data: [0; MUSIG_AGGNONCE_LEN] } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct MusigSession { + pub data: [c_uchar; MUSIG_SESSION_LEN], +} + +impl MusigSession { + pub fn new() -> Self { + Self { data: [0; MUSIG_SESSION_LEN] } + } +} + +#[repr(C)] +#[derive(Debug, Copy, Clone, Eq, PartialEq)] +pub struct MusigPartialSignature { + pub data: [c_uchar; MUSIG_PART_SIG_LEN], +} + +impl MusigPartialSignature { + pub fn new() -> Self { + Self { data: [0; MUSIG_PART_SIG_LEN] } + } +} + +pub fn xonly_from_pubkey(cx: *const Context, pubkey: *const PublicKey) -> (XOnlyPublicKey, c_int) { + unsafe { + let mut xonly = XOnlyPublicKey::new(); + let mut parity = 0; + secp256k1_xonly_pubkey_from_pubkey(cx, &mut xonly, &mut parity, pubkey); + (xonly, parity) + } +} diff --git a/secp256k1-zkp-sys/vendor-libsecp.sh b/secp256k1-zkp-sys/vendor-libsecp.sh index 2bee54ba..0102fba3 100755 --- a/secp256k1-zkp-sys/vendor-libsecp.sh +++ b/secp256k1-zkp-sys/vendor-libsecp.sh @@ -26,7 +26,7 @@ done cd "$PARENT_DIR" || exit 1 rm -rf "$DIR" -git clone https://github.com/ElementsProject/secp256k1-zkp.git "$DIR" +git clone https://github.com/jonasnick/secp256k1-zkp.git "$DIR" cd "$DIR" if [ -n "$REV" ]; then git checkout "$REV" diff --git a/src/lib.rs b/src/lib.rs index b35668fb..6e290e12 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -118,6 +118,22 @@ pub enum Error { CannotCreateWhitelistSignature, /// The given whitelist signature doesn't correctly prove inclusion in the whitelist. InvalidWhitelistProof, + /// Cannot establish Musig pre-session + InvalidMusigPreSession, + /// Invalid tweak to Musig public key + InvalidMusigTweak, + /// Cannot establish a Musig session + InvalidMusigSession, + /// Invalid Musig public nonces + CannotGenMusigNonce, + /// Invalid Musig public nonce + InvalidMusigPubNonce, + /// Invalid Musig aggregated nonce + InvalidMusigAggNonce, + /// Invalid Musig partial signature + InvalidMusigPartSig, + /// Cannot extract Musig secret adaptor + InvalidMusigExtract, } // Passthrough Debug to Display, since errors should be user-visible @@ -145,6 +161,14 @@ impl fmt::Display for Error { Error::InvalidWhitelistProof => { "given whitelist signature doesn't correctly prove inclusion in the whitelist" } + Error::InvalidMusigPreSession => "failed to create Musig pre-session", + Error::InvalidMusigTweak => "malformed Musig tweak", + Error::InvalidMusigSession => "failed to create a Musig session", + Error::CannotGenMusigNonce => "failed to create a Musig nonce pair", + Error::InvalidMusigPubNonce => "malformed Musig public nonce(s)", + Error::InvalidMusigAggNonce => "malformed Musig aggregated nonce", + Error::InvalidMusigPartSig => "malformed Musig partial signature", + Error::InvalidMusigExtract => "failed to extract Musig secret adaptor", }; f.write_str(str) diff --git a/src/zkp/mod.rs b/src/zkp/mod.rs index d16ad2aa..70c63249 100644 --- a/src/zkp/mod.rs +++ b/src/zkp/mod.rs @@ -1,6 +1,8 @@ mod ecdsa_adaptor; mod generator; #[cfg(feature = "std")] +mod musig; +#[cfg(feature = "std")] mod pedersen; #[cfg(feature = "std")] mod rangeproof; @@ -12,6 +14,8 @@ mod whitelist; pub use self::ecdsa_adaptor::*; pub use self::generator::*; #[cfg(feature = "std")] +pub use self::musig::*; +#[cfg(feature = "std")] pub use self::pedersen::*; #[cfg(feature = "std")] pub use self::rangeproof::*; diff --git a/src/zkp/musig.rs b/src/zkp/musig.rs new file mode 100644 index 00000000..0db92867 --- /dev/null +++ b/src/zkp/musig.rs @@ -0,0 +1,1176 @@ +///! This module implements high-level Rust bindings for a Schnorr-based +///! multi-signature scheme called MuSig2 (https://eprint.iacr.org/2020/1261). +///! It is compatible with bip-schnorr. +///! +///! Documentation tests and some examples in [examples/musig.rs] show how the library can be used. +///! +///! The module also supports adaptor signatures as described in +///! https://github.com/ElementsProject/scriptless-scripts/pull/24 +///! +///! The documentation in this include file is for reference and may not be sufficient +///! for users to begin using the library. A full description of the C API usage can be found +///! in [C-musig.md](secp256k1-sys/depend/secp256k1/src/modules/musig/musig.md), and Rust API +///! usage can be found in [Rust-musig.md](USAGE.md). +use ffi::{self, CPtr}; +use schnorrsig; +use Error; +use Signing; +use {Message, PublicKey, Secp256k1, SecretKey}; + +/// Data structure containing auxiliary data generated in `pubkey_agg` and +/// required for `session_*_init`. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct MusigPreSession { + inner: ffi::MusigKeyaggCache, + agg_pk: schnorrsig::PublicKey, +} + +impl CPtr for MusigPreSession { + type Target = ffi::MusigKeyaggCache; + + fn as_c_ptr(&self) -> *const Self::Target { + self.as_ptr() + } + + fn as_mut_c_ptr(&mut self) -> *mut Self::Target { + self.as_mut_ptr() + } +} + +impl MusigPreSession { + /// Create a new MusigPreSession by supplying a list of PublicKeys used in the session + /// + /// Computes a combined public key and the hash of the given public keys. + /// + /// Different orders of `pubkeys` result in different `agg_pk`s. + /// + /// The pubkeys can be sorted before combining with `rustsecp256k1zkp_v0_4_0_xonly_sort` which + /// ensures the same resulting `agg_pk` for the same multiset of pubkeys. + /// This is useful to do before pubkey_combine, such that the order of pubkeys + /// does not affect the combined public key. + /// + /// Returns: MusigPreSession if the public keys were successfully combined, Error otherwise + /// Args: secp: Secp256k1 context object initialized for verification + /// Out: pre_session: `MusigPreSession` struct to be used in + /// `MusigPreSession::nonce_process` or `MusigPreSession::pubkey_tweak_add`. + /// `MusigPreSession` also contains the Musig-combined xonly public key + /// In: pubkeys: input array of public keys to combine. The order + /// is important; a different order will result in a different + /// combined public key + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{MusigPreSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key.clone()); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let _pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// ``` + pub fn new(secp: &Secp256k1, pubkeys: &[schnorrsig::PublicKey]) -> Result { + let cx = *secp.ctx(); + let xonly_ptrs = pubkeys + .iter() + .map(|k| k.as_ptr()) + .collect::>(); + let mut keyagg_cache = ffi::MusigKeyaggCache::new(); + + unsafe { + let mut agg_pk = schnorrsig::PublicKey::from(ffi::XOnlyPublicKey::new()); + if ffi::secp256k1_musig_pubkey_agg( + cx, + // FIXME: passing null pointer to ScratchSpace uses less efficient algorithm + // Need scratch_space_{create,destroy} exposed in public C API to safely handle + // memory + core::ptr::null_mut(), + agg_pk.as_mut_ptr(), + &mut keyagg_cache, + xonly_ptrs.as_ptr() as *const *const _, + xonly_ptrs.len(), + ) == 0 + { + Err(Error::InvalidMusigPreSession) + } else { + Ok(Self { + inner: keyagg_cache, + agg_pk, + }) + } + } + } + + /// Tweak an x-only public key by adding the generator multiplied with tweak32 + /// to it. The resulting output_pubkey with the given agg_pk and tweak + /// passes `rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_test`. + /// + /// This function is only useful before initializing a signing session. If you + /// are only computing a public key, but not intending to create a signature for + /// it, you can just use `rustsecp256k1zkp_v0_4_0_xonly_pubkey_tweak_add`. Can only be called + /// once with a given pre_session. + /// + /// Returns: Error if the arguments are invalid or the resulting public key would be + /// invalid (only when the tweak is the negation of the corresponding + /// secret key). Tweaked PublicKey otherwise. + /// Args: secp: Secp256k1 context object initialized for verification + /// Out: output_pubkey: PublicKey with the result of the tweak + /// In: tweak32: const reference to a 32-byte tweak. If the tweak is invalid + /// according to rustsecp256k1zkp_v0_4_0_ec_seckey_verify, this function + /// returns Error. For uniformly random 32-byte arrays the + /// chance of being invalid is negligible (around 1 in + /// 2^128) + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{MusigPreSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key.clone()); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let mut pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let _pubkey = pre_session.pubkey_tweak_add(&secp, &[2; 32]).unwrap(); + /// ``` + pub fn pubkey_tweak_add( + &mut self, + secp: &Secp256k1, + tweak: &[u8; 32], + ) -> Result { + let cx = *secp.ctx(); + unsafe { + let mut out = PublicKey::from(ffi::PublicKey::new()); + if ffi::secp256k1_musig_pubkey_tweak_add( + cx, + out.as_mut_ptr(), + tweak.as_ptr(), + self.as_mut_ptr(), + ) == 0 + { + Err(Error::InvalidMusigTweak) + } else { + Ok(out) + } + } + } + + /// Starts a signing session by generating a nonce + /// + /// This function outputs a secret nonce that will be required for signing and a + /// corresponding public nonce that is intended to be sent to other signers. + /// + /// MuSig differs from regular Schnorr signing in that implementers _must_ take + /// special care to not reuse a nonce. This can be ensured by following these rules: + /// + /// 1. Always provide a unique session_id32. It is a "number used once". + /// 2. If you already know the signing key, message or aggregate public key + /// cache, they can be optionally provided to derive the nonce and increase + /// misuse-resistance. The extra_input32 argument can be used to provide + /// additional data that does not repeat in normal scenarios, such as the + /// current time. + /// 3. If you do not provide a seckey, session_id32 _must_ be UNIFORMLY RANDOM. + /// If you do provide a seckey, session_id32 can instead be a counter (that + /// must never repeat!). However, it is recommended to always choose + /// session_id32 uniformly at random. Note that using the same seckey for + /// multiple MuSig sessions is fine. + /// 4. Avoid copying (or serializing) the secnonce. This reduces the possibility + /// that it is used more than once for signing. + /// + /// Remember that nonce reuse will immediately leak the secret key! + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigPreSession, PublicKey, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let id = [2; 32]; + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let (_sec_nonce, _pub_nonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// ``` + pub fn nonce_gen( + &self, + secp: &Secp256k1, + session_id: &[u8; 32], + seckey: Option<&SecretKey>, + msg: &Message, + extra: Option<&[u8; 32]>, + ) -> Result<(MusigSecNonce, MusigPubNonce), Error> { + let cx = *secp.ctx(); + let extra_ptr = match extra { + Some(e) => e.as_ptr(), + None => core::ptr::null(), + }; + unsafe { + let mut sec_nonce = MusigSecNonce(ffi::MusigSecNonce::new()); + let mut pub_nonce = MusigPubNonce(ffi::MusigPubNonce::new()); + let sk_ptr = match seckey { + Some(s) => s.as_ptr(), + None => core::ptr::null(), + }; + if ffi::secp256k1_musig_nonce_gen( + cx, + sec_nonce.as_mut_ptr(), + pub_nonce.as_mut_ptr(), + session_id.as_ptr(), + sk_ptr, + msg.as_ptr(), + self.as_ptr(), + extra_ptr, + ) == 0 + { + Err(Error::CannotGenMusigNonce) + } else { + Ok((sec_nonce, pub_nonce)) + } + } + } + + /// Process MusigPreSession nonces to create a session cache and signature template + /// Takes the public nonces of all signers and computes a session cache that is + /// required for signing and verification of partial signatures and a signature + /// template that is required for combining partial signatures. + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigSession, PublicKey, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [1; 32]; + /// let (_secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// let _session = pre_session.nonce_process( + /// &secp, + /// &aggnonce, + /// &msg, + /// None, + /// ).unwrap(); + /// ``` + pub fn nonce_process( + &self, + secp: &Secp256k1, + aggnonce: &MusigAggNonce, + msg: &Message, + adaptor: Option<&PublicKey>, + ) -> Result { + let mut session = MusigSession(ffi::MusigSession::new()); + let adaptor_ptr = match adaptor { + Some(a) => a.as_ptr(), + None => core::ptr::null(), + }; + unsafe { + if ffi::secp256k1_musig_nonce_process( + *secp.ctx(), + session.as_mut_ptr(), + aggnonce.as_ptr(), + msg.as_ptr(), + self.as_ptr(), + adaptor_ptr, + ) == 0 + { + Err(Error::InvalidMusigPubNonce) + } else { + Ok(session) + } + } + } + + /// Get a const reference to the aggregated public key + pub fn agg_pk(&self) -> &schnorrsig::PublicKey { + &self.agg_pk + } + + /// Get a const pointer to the inner MusigPreSession + pub fn as_ptr(&self) -> *const ffi::MusigKeyaggCache { + &self.inner + } + + /// Get a mut pointer to the inner MusigPreSession + pub fn as_mut_ptr(&mut self) -> *mut ffi::MusigKeyaggCache { + &mut self.inner + } +} + +/// Opaque data structure that holds a partial MuSig signature. +/// +/// Guaranteed to be 32 bytes in size. Serialized and parsed with +/// [MusigPartialSignature::serialize] and [MusigPartialSignature::parse]. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct MusigPartialSignature(ffi::MusigPartialSignature); + +impl CPtr for MusigPartialSignature { + type Target = ffi::MusigPartialSignature; + + fn as_c_ptr(&self) -> *const Self::Target { + self.as_ptr() + } + + fn as_mut_c_ptr(&mut self) -> *mut Self::Target { + self.as_mut_ptr() + } +} + +impl MusigPartialSignature { + /// Serialize a MuSigPartialSignature or adaptor signature + /// + /// Returns: 32-byte array when the signature could be serialized, Error otherwise + /// Args: ctx: a Secp256k1 context object + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigSession, PublicKey, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// let session = pre_session.nonce_process( + /// &secp, + /// &aggnonce, + /// &msg, + /// None, + /// ).unwrap(); + /// + /// let partial_sig = session.partial_sign( + /// &secp, + /// &mut secnonce, + /// &keypair, + /// &pre_session, + /// ).unwrap(); + /// + /// let _ser_sig = partial_sig.serialize(&secp).unwrap(); + /// ``` + pub fn serialize(&self, secp: &Secp256k1) -> Result<[u8; 32], Error> { + let mut data = [0; 32]; + unsafe { + if ffi::secp256k1_musig_partial_sig_serialize( + *secp.ctx(), + data.as_mut_ptr(), + self.as_ptr(), + ) == 0 + { + Err(Error::InvalidMusigPartSig) + } else { + Ok(data) + } + } + } + + /// Deserialize a MusigPartialSignature from a portable byte representation + /// Parse and verify a MuSig partial signature. + /// + /// After the call, sig will always be initialized. If parsing failed or the + /// encoded numbers are out of range, signature verification with it is + /// guaranteed to fail for every message and public key. + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{ + /// # Message, MusigAggNonce, MusigPartialSignature, MusigPreSession, MusigSession, Secp256k1, SecretKey, + /// # }; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// let session = pre_session.nonce_process( + /// &secp, + /// &aggnonce, + /// &msg, + /// None, + /// ).unwrap(); + /// + /// let partial_sig = session.partial_sign( + /// &secp, + /// &mut secnonce, + /// &keypair, + /// &pre_session, + /// ).unwrap(); + /// + /// let ser_sig = partial_sig.serialize(&secp).unwrap(); + /// let _parsed_sig = MusigPartialSignature::parse(&secp, &ser_sig).unwrap(); + /// ``` + pub fn parse(secp: &Secp256k1, data: &[u8]) -> Result { + let mut part_sig = MusigPartialSignature(ffi::MusigPartialSignature::new()); + if data.len() != 32 { + return Err(Error::InvalidMusigPartSig); + } + unsafe { + if ffi::secp256k1_musig_partial_sig_parse( + *secp.ctx(), + part_sig.as_mut_ptr(), + data.as_ptr(), + ) == 0 + { + Err(Error::InvalidMusigPartSig) + } else { + Ok(part_sig) + } + } + } + + /// Get a const pointer to the inner MusigPartialSignature + pub fn as_ptr(&self) -> *const ffi::MusigPartialSignature { + &self.0 + } + + /// Get a mut pointer to the inner MusigPartialSignature + pub fn as_mut_ptr(&mut self) -> *mut ffi::MusigPartialSignature { + &mut self.0 + } +} + +/// Converts a partial signature to an adaptor signature by adding a given secret adaptor. +/// +/// Example: +/// +/// ```rust +/// # use secp256k1_zkp::{adapt, Message, MusigAggNonce, MusigPreSession, MusigSession, PublicKey, Secp256k1, SecretKey}; +/// # use secp256k1_zkp::schnorrsig; +/// let secp = Secp256k1::new(); +/// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); +/// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); +/// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); +/// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); +/// let msg = Message::from_slice(&[3; 32]).unwrap(); +/// let id = [2; 32]; +/// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); +/// +/// let adapt_bytes = [2; 32]; +/// let adapt_sec = SecretKey::from_slice(&adapt_bytes).unwrap(); +/// let adapt_pub = PublicKey::from_secret_key(&secp, &adapt_sec); +/// +/// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); +/// let mut session = pre_session.nonce_process( +/// &secp, +/// &aggnonce, +/// &msg, +/// Some(&adapt_pub), +/// ).unwrap(); +/// +/// let partial_sig = session.partial_sign( +/// &secp, +/// &mut secnonce, +/// &keypair, +/// &pre_session, +/// ).unwrap(); +/// let nonce_parity = session.nonce_parity(&secp).unwrap(); +/// let pre_sig = session.partial_sig_agg(&secp, &[partial_sig]).unwrap(); +/// +/// let _adaptor_sig = adapt(&secp, &pre_sig, &adapt_sec, nonce_parity).unwrap(); +/// ``` +pub fn adapt( + secp: &Secp256k1, + pre_sig: &schnorrsig::Signature, + sec_adaptor: &SecretKey, + nonce_parity: i32, +) -> Result { + unsafe { + let mut sig = pre_sig.clone(); + if ffi::secp256k1_musig_adapt( + *secp.ctx(), + sig.as_mut_ptr(), + sec_adaptor.as_ptr(), + nonce_parity, + ) == 0 + { + Err(Error::InvalidMusigPartSig) + } else { + Ok(schnorrsig::Signature::from_slice(sig.as_ref())?) + } + } +} + +/// Extracts a secret adaptor from a MuSig, given all parties' partial +/// signatures. This function will not fail unless given grossly invalid data; if it +/// is merely given signatures that do not verify, the returned value will be +/// nonsense. It is therefore important that all data be verified at earlier steps of +/// any protocol that uses this function. +/// +/// Example: +/// +/// ```rust +/// # use secp256k1_zkp::{adapt, extract_adaptor}; +/// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigSession, PublicKey, Secp256k1, SecretKey}; +/// # use secp256k1_zkp::schnorrsig; +/// let secp = Secp256k1::new(); +/// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); +/// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); +/// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); +/// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); +/// let msg = Message::from_slice(&[3; 32]).unwrap(); +/// +/// let adapt_bytes = [2; 32]; +/// let adapt_sec = SecretKey::from_slice(&adapt_bytes).unwrap(); +/// let adapt_pub = PublicKey::from_secret_key(&secp, &adapt_sec); +/// +/// let id = [2; 32]; +/// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); +/// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); +/// +/// let mut session = pre_session.nonce_process( +/// &secp, +/// &aggnonce, +/// &msg, +/// Some(&adapt_pub), +/// ).unwrap(); +/// +/// let partial_sig = session.partial_sign( +/// &secp, +/// &mut secnonce, +/// &keypair, +/// &pre_session, +/// ).unwrap(); +/// +/// let nonce_parity = session.nonce_parity(&secp).unwrap(); +/// let pre_sig = session.partial_sig_agg(&secp, &[partial_sig]).unwrap(); +/// let adaptor_sig = adapt(&secp, &pre_sig, &adapt_sec, nonce_parity).unwrap(); +/// let extracted_sec = extract_adaptor( +/// &secp, +/// &adaptor_sig, +/// &pre_sig, +/// nonce_parity, +/// ).unwrap(); +/// assert_eq!(extracted_sec, adapt_sec); +/// ``` +pub fn extract_adaptor( + secp: &Secp256k1, + sig: &schnorrsig::Signature, + pre_sig: &schnorrsig::Signature, + nonce_parity: i32, +) -> Result { + unsafe { + let mut secret = SecretKey::from_slice([1; 32].as_ref())?; + if ffi::secp256k1_musig_extract_adaptor( + *secp.ctx(), + secret.as_mut_ptr(), + sig.as_ptr(), + pre_sig.as_ptr(), + nonce_parity, + ) == 0 + { + Err(Error::InvalidMusigExtract) + } else { + Ok(secret) + } + } +} + +/// Guaranteed to be 64 bytes in size. This structure MUST NOT be copied or +/// read or written to it directly. A signer who is online throughout the whole +/// process and can keep this structure in memory can use the provided API +/// functions for a safe standard workflow. See +/// https://blockstream.com/2019/02/18/musig-a-new-multisignature-standard/ for +/// more details about the risks associated with serializing or deserializing +/// this structure. There are no serialization and parsing functions (yet). +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct MusigSecNonce(ffi::MusigSecNonce); + +impl CPtr for MusigSecNonce { + type Target = ffi::MusigSecNonce; + + fn as_c_ptr(&self) -> *const Self::Target { + self.as_ptr() + } + + fn as_mut_c_ptr(&mut self) -> *mut Self::Target { + self.as_mut_ptr() + } +} + +impl MusigSecNonce { + /// Get a const pointer to the inner MusigPreSession + pub fn as_ptr(&self) -> *const ffi::MusigSecNonce { + &self.0 + } + + /// Get a mut pointer to the inner MusigPreSession + pub fn as_mut_ptr(&mut self) -> *mut ffi::MusigSecNonce { + &mut self.0 + } +} + +/// Opaque data structure that holds a MuSig public nonce. +/// +/// Guaranteed to be 66 bytes in size. There are no serialization and parsing functions (yet). +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct MusigPubNonce(ffi::MusigPubNonce); + +impl CPtr for MusigPubNonce { + type Target = ffi::MusigPubNonce; + + fn as_c_ptr(&self) -> *const Self::Target { + self.as_ptr() + } + + fn as_mut_c_ptr(&mut self) -> *mut Self::Target { + self.as_mut_ptr() + } +} + +impl MusigPubNonce { + /// Serialize a MusigPubNonce + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigPreSession, MusigPubNonce, PublicKey, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// + /// let _pubnonce_ser = pubnonce.serialize(&secp).unwrap(); + /// ``` + pub fn serialize(&self, secp: &Secp256k1) -> Result<[u8; ffi::MUSIG_PUBNONCE_LEN], Error> { + let mut data = [0; ffi::MUSIG_PUBNONCE_LEN]; + unsafe { + if ffi::secp256k1_musig_pubnonce_serialize( + *secp.ctx(), + data.as_mut_ptr(), + self.as_ptr(), + ) == 0 + { + Err(Error::InvalidMusigPubNonce) + } else { + Ok(data) + } + } + } + + /// Deserialize a MusigPubNonce from a portable byte representation + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigPreSession, MusigPubNonce, PublicKey, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// + /// let pubnonce_ser = pubnonce.serialize(&secp).unwrap(); + /// let parsed_pubnonce = MusigPubNonce::parse(&secp, &pubnonce_ser).unwrap(); + /// assert_eq!(parsed_pubnonce, pubnonce); + /// ``` + pub fn parse(secp: &Secp256k1, data: &[u8]) -> Result { + let mut pubnonce = MusigPubNonce(ffi::MusigPubNonce::new()); + if data.len() != ffi::MUSIG_PUBNONCE_LEN { + return Err(Error::InvalidMusigPartSig); + } + unsafe { + if ffi::secp256k1_musig_pubnonce_parse( + *secp.ctx(), + pubnonce.as_mut_ptr(), + data.as_ptr(), + ) == 0 + { + Err(Error::InvalidMusigPubNonce) + } else { + Ok(pubnonce) + } + } + } + + /// Get a const pointer to the inner MusigPubNonce + pub fn as_ptr(&self) -> *const ffi::MusigPubNonce { + &self.0 + } + + /// Get a mut pointer to the inner MusigPubNonce + pub fn as_mut_ptr(&mut self) -> *mut ffi::MusigPubNonce { + &mut self.0 + } +} + +/// Opaque data structure that holds a MuSig aggregated nonce. +/// +/// Guaranteed to be 66 bytes in size. There are no serialization and parsing functions (yet). +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct MusigAggNonce(ffi::MusigAggNonce); + +impl CPtr for MusigAggNonce { + type Target = ffi::MusigAggNonce; + + fn as_c_ptr(&self) -> *const Self::Target { + self.as_ptr() + } + + fn as_mut_c_ptr(&mut self) -> *mut Self::Target { + self.as_mut_ptr() + } +} + +impl MusigAggNonce { + /// Combine received public nonces into a single aggregated nonce + /// + /// This is useful to reduce the communication between signers, because instead + /// of everyone sending nonces to everyone else, there can be one party + /// receiving all nonces, combining the nonces with this function and then + /// sending only the combined nonce back to the signers. The pubnonces argument + /// of [MusigPreSession::nonce_process] then simply becomes an array whose sole + /// element is this combined nonce. + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigPubNonce, MusigSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// let _aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// ``` + pub fn new(secp: &Secp256k1, nonces: &[MusigPubNonce]) -> Result { + let mut aggnonce = Self(ffi::MusigAggNonce::new()); + let nonce_ptrs = nonces.iter().map(|n| n.as_ptr()).collect::>(); + unsafe { + if ffi::secp256k1_musig_nonce_agg( + *secp.ctx(), + aggnonce.as_mut_ptr(), + nonce_ptrs.as_ptr(), + nonce_ptrs.len(), + ) == 0 + { + Err(Error::InvalidMusigPubNonce) + } else { + Ok(aggnonce) + } + } + } + + /// Serialize a MusigAggNonce + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// + /// let _aggnonce_ser = aggnonce.serialize(&secp).unwrap(); + /// ``` + pub fn serialize(&self, secp: &Secp256k1) -> Result<[u8; ffi::MUSIG_AGGNONCE_LEN], Error> { + let mut data = [0; ffi::MUSIG_AGGNONCE_LEN]; + unsafe { + if ffi::secp256k1_musig_aggnonce_serialize( + *secp.ctx(), + data.as_mut_ptr(), + self.as_ptr(), + ) == 0 + { + Err(Error::InvalidMusigAggNonce) + } else { + Ok(data) + } + } + } + + /// Deserialize a MusigAggNonce from a portable byte representation + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// + /// let aggnonce_ser = aggnonce.serialize(&secp).unwrap(); + /// let parsed_aggnonce = MusigAggNonce::parse(&secp, &aggnonce_ser).unwrap(); + /// assert_eq!(parsed_aggnonce, aggnonce); + /// ``` + pub fn parse(secp: &Secp256k1, data: &[u8]) -> Result { + if data.len() != ffi::MUSIG_AGGNONCE_LEN { + return Err(Error::InvalidMusigPartSig); + } + let mut aggnonce = Self(ffi::MusigAggNonce::new()); + unsafe { + if ffi::secp256k1_musig_aggnonce_parse( + *secp.ctx(), + aggnonce.as_mut_ptr(), + data.as_ptr(), + ) == 0 + { + Err(Error::InvalidMusigAggNonce) + } else { + Ok(aggnonce) + } + } + } + + /// Get a const pointer to the inner MusigAggNonce + pub fn as_ptr(&self) -> *const ffi::MusigAggNonce { + &self.0 + } + + /// Get a mut pointer to the inner MusigAggNonce + pub fn as_mut_ptr(&mut self) -> *mut ffi::MusigAggNonce { + &mut self.0 + } +} + +/// Musig session data structure containing the +/// secret and public nonce used in a multi-signature signing session +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub struct MusigSession(ffi::MusigSession); + +impl CPtr for MusigSession { + type Target = ffi::MusigSession; + + fn as_c_ptr(&self) -> *const Self::Target { + self.as_ptr() + } + + fn as_mut_c_ptr(&mut self) -> *mut Self::Target { + self.as_mut_ptr() + } +} + +impl MusigSession { + /// Produces a partial signature + /// + /// This function sets the given secnonce to 0 and will abort if given a + /// secnonce that is 0. This is a best effort attempt to protect against nonce + /// reuse. However, this is of course easily defeated if the secnonce has been + /// copied (or serialized). + /// + /// Remember that nonce reuse will immediately leak the secret key! + /// + /// Returns: Error if the arguments are invalid or the provided secnonce has already + /// been used for signing, MusigPartialSignature otherwise + /// Args: ctx: pointer to a context object (cannot be NULL) + /// In/Out: secnonce: MusigSecNonce struct created in [MusigSession::new] + /// In: keypair: Keypair to sign the message with + /// session_cache: MusigSessionCache that was created with [MusigPartialSig::nonce_process] + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// let session = pre_session.nonce_process( + /// &secp, + /// &aggnonce, + /// &msg, + /// None, + /// ).unwrap(); + /// + /// let _partial_sig = session.partial_sign( + /// &secp, + /// &mut secnonce, + /// &keypair, + /// &pre_session, + /// ).unwrap(); + /// ``` + pub fn partial_sign( + &self, + secp: &Secp256k1, + secnonce: &mut MusigSecNonce, + keypair: &schnorrsig::KeyPair, + pre_session: &MusigPreSession, + ) -> Result { + unsafe { + let mut partial_sig = MusigPartialSignature(ffi::MusigPartialSignature::new()); + if ffi::secp256k1_musig_partial_sign( + *secp.ctx(), + partial_sig.as_mut_ptr(), + secnonce.as_mut_ptr(), + keypair.as_ptr(), + pre_session.as_ptr(), + self.as_ptr(), + ) == 0 + { + Err(Error::InvalidMusigPartSig) + } else { + Ok(partial_sig) + } + } + } + + /// Checks that an individual partial signature verifies + /// + /// This function is essential when using protocols with adaptor signatures. + /// However, it is not essential for regular MuSig's, in the sense that if any + /// partial signatures does not verify, the full signature will also not verify, so the + /// problem will be caught. But this function allows determining the specific party + /// who produced an invalid signature, so that signing can be restarted without them. + /// + /// Returns: false if the arguments are invalid or the partial signature does not + /// verify, true otherwise + /// Args secp: Secp256k1 context object, initialized for verification + /// In: pubnonce: the 66-byte pubnonce sent by the signer who produced + /// the signature + /// pubkey: public key of the signer who produced the signature + /// pre_session: MusigPreSession that was output when the + /// combined public key for this session + /// session_cache: MusigSessionCache that was created with + /// [MusigPartialSig::nonce_process] + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let mut pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// let session = pre_session.nonce_process( + /// &secp, + /// &aggnonce, + /// &msg, + /// None, + /// ).unwrap(); + /// + /// let partial_sig = session.partial_sign( + /// &secp, + /// &mut secnonce, + /// &keypair, + /// &pre_session, + /// ).unwrap(); + /// + /// assert!(session.partial_verify( + /// &secp, + /// &partial_sig, + /// &pubnonce, + /// &pub_key, + /// &pre_session, + /// )); + /// ``` + pub fn partial_verify( + &self, + secp: &Secp256k1, + partial_sig: &MusigPartialSignature, + pubnonce: &MusigPubNonce, + pubkey: &schnorrsig::PublicKey, + pre_session: &MusigPreSession, + ) -> bool { + let cx = *secp.ctx(); + unsafe { + ffi::secp256k1_musig_partial_sig_verify( + cx, + partial_sig.as_ptr(), + pubnonce.as_ptr(), + pubkey.as_ptr(), + pre_session.as_ptr(), + self.as_ptr(), + ) == 1 + } + } + + /// Aggregate partial signatures + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// let session = pre_session.nonce_process( + /// &secp, + /// &aggnonce, + /// &msg, + /// None, + /// ).unwrap(); + /// + /// let partial_sig = session.partial_sign( + /// &secp, + /// &mut secnonce, + /// &keypair, + /// &pre_session, + /// ).unwrap(); + /// let _sig = session.partial_sig_agg(&secp, &[partial_sig]).unwrap(); + /// ``` + pub fn partial_sig_agg( + &self, + secp: &Secp256k1, + partial_sigs: &[MusigPartialSignature], + ) -> Result { + let part_sigs = partial_sigs.iter().map(|s| s.as_ptr()).collect::>(); + let mut sig = [0u8; 64]; + unsafe { + if ffi::secp256k1_musig_partial_sig_agg( + *secp.ctx(), + sig.as_mut_ptr(), + self.as_ptr(), + part_sigs.as_ptr(), + part_sigs.len(), + ) == 0 + { + Err(Error::InvalidMusigPartSig) + } else { + Ok(schnorrsig::Signature::from_slice(&sig)?) + } + } + } + + /// Extracts the nonce_parity bit from a session + /// + /// This is used for adaptor signatures + /// + /// Example: + /// + /// ```rust + /// # use secp256k1_zkp::{Message, MusigAggNonce, MusigPreSession, MusigSession, Secp256k1, SecretKey}; + /// # use secp256k1_zkp::schnorrsig; + /// let secp = Secp256k1::new(); + /// let sec_key = SecretKey::from_slice([1; 32].as_ref()).unwrap(); + /// let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + /// let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + /// let pre_session = MusigPreSession::new(&secp, &[pub_key]).unwrap(); + /// let msg = Message::from_slice(&[3; 32]).unwrap(); + /// let id = [2; 32]; + /// let (mut secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + /// + /// let aggnonce = MusigAggNonce::new(&secp, &[pubnonce]).unwrap(); + /// let mut session = pre_session.nonce_process( + /// &secp, + /// &aggnonce, + /// &msg, + /// None, + /// ).unwrap(); + /// + /// let _parity = session.nonce_parity(&secp).unwrap(); + /// ``` + pub fn nonce_parity(&mut self, secp: &Secp256k1) -> Result { + let mut ret = 0i32; + let cx = *secp.ctx(); + unsafe { + if ffi::secp256k1_musig_nonce_parity(cx, &mut ret, self.as_mut_ptr()) == 0 { + Err(Error::InvalidMusigSession) + } else { + Ok(ret) + } + } + } + + /// Get a const pointer to the inner MusigSession + pub fn as_ptr(&self) -> *const ffi::MusigSession { + &self.0 + } + + /// Get a mut pointer to the inner MusigSession + pub fn as_mut_ptr(&mut self) -> *mut ffi::MusigSession { + &mut self.0 + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::{thread_rng, RngCore}; + + #[test] + fn test_pre_session() { + let secp = Secp256k1::new(); + let mut sec_bytes = [0; 32]; + thread_rng().fill_bytes(&mut sec_bytes); + let sec_key = SecretKey::from_slice(&sec_bytes).unwrap(); + let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + + let _pre_session = MusigPreSession::new(&secp, &[pub_key, pub_key]).unwrap(); + } + + #[test] + fn test_nonce_parsing() { + let secp = Secp256k1::new(); + let mut sec_bytes = [0; 32]; + thread_rng().fill_bytes(&mut sec_bytes); + let sec_key = SecretKey::from_slice(&sec_bytes).unwrap(); + let keypair = schnorrsig::KeyPair::from_secret_key(&secp, sec_key); + let pub_key = schnorrsig::PublicKey::from_keypair(&secp, &keypair); + + let pre_session = MusigPreSession::new(&secp, &[pub_key, pub_key]).unwrap(); + let msg = Message::from_slice(&[3; 32]).unwrap(); + let id = [2; 32]; + let (_secnonce, pubnonce) = pre_session.nonce_gen(&secp, &id, None, &msg, None).unwrap(); + + let pubnonce_ser = pubnonce.serialize(&secp).unwrap(); + let parsed_pubnonce = MusigPubNonce::parse(&secp, &pubnonce_ser).unwrap(); + + assert_eq!(parsed_pubnonce, pubnonce); + } +}