@@ -488,6 +488,13 @@ static int secp256k1_bulletproof_rangeproof_prove_impl(const secp256k1_ecmult_co
488
488
489
489
secp256k1_scalar_chacha20 (& alpha , & rho , nonce , 0 );
490
490
secp256k1_scalar_chacha20 (& tau1 , & tau2 , nonce , 1 );
491
+ /* Encrypt value into alpha, so it will be recoverable from -mu by someone who knows `nonce` */
492
+ if (n_commits == 1 ) {
493
+ secp256k1_scalar vals ;
494
+ secp256k1_scalar_set_u64 (& vals , value [0 ]);
495
+ secp256k1_scalar_negate (& vals , & vals ); /* Negate so it'll be positive in -mu */
496
+ secp256k1_scalar_add (& alpha , & alpha , & vals );
497
+ }
491
498
492
499
/* Compute A and S */
493
500
secp256k1_ecmult_const (& aj , & gens -> blinding_gen [0 ], & alpha , 256 );
@@ -645,4 +652,141 @@ static int secp256k1_bulletproof_rangeproof_prove_impl(const secp256k1_ecmult_co
645
652
646
653
return 1 ;
647
654
}
655
+
656
+ static int secp256k1_bulletproof_rangeproof_rewind_impl (uint64_t * value , secp256k1_scalar * blind , const unsigned char * proof , const size_t plen , uint64_t min_value , const secp256k1_pedersen_commitment * pcommit , const secp256k1_generator * value_gen , const secp256k1_ge * blind_gen , const unsigned char * nonce , const unsigned char * extra_commit , size_t extra_commit_len ) {
657
+ secp256k1_sha256 sha256 ;
658
+ static const unsigned char zero24 [24 ] = { 0 };
659
+ unsigned char commit [32 ] = { 0 };
660
+ unsigned char lrparity ;
661
+ secp256k1_scalar taux , mu ;
662
+ secp256k1_scalar alpha , rho , tau1 , tau2 ;
663
+ secp256k1_scalar x , z ;
664
+ secp256k1_ge commitp , value_genp ;
665
+ secp256k1_gej rewind_commitj ;
666
+ int overflow ;
667
+
668
+ if (plen < 64 + 128 + 1 || plen > SECP256K1_BULLETPROOF_MAX_PROOF ) {
669
+ return 0 ;
670
+ }
671
+
672
+ /* Extract data from beginning of proof */
673
+ secp256k1_scalar_set_b32 (& taux , & proof [0 ], & overflow );
674
+ if (overflow || secp256k1_scalar_is_zero (& taux )) {
675
+ return 0 ;
676
+ }
677
+ secp256k1_scalar_set_b32 (& mu , & proof [32 ], & overflow );
678
+ if (overflow || secp256k1_scalar_is_zero (& mu )) {
679
+ return 0 ;
680
+ }
681
+
682
+ secp256k1_scalar_chacha20 (& alpha , & rho , nonce , 0 );
683
+ secp256k1_scalar_chacha20 (& tau1 , & tau2 , nonce , 1 );
684
+
685
+ if (min_value > 0 ) {
686
+ unsigned char vbuf [8 ];
687
+ vbuf [0 ] = min_value ;
688
+ vbuf [1 ] = min_value >> 8 ;
689
+ vbuf [2 ] = min_value >> 16 ;
690
+ vbuf [3 ] = min_value >> 24 ;
691
+ vbuf [4 ] = min_value >> 32 ;
692
+ vbuf [5 ] = min_value >> 40 ;
693
+ vbuf [6 ] = min_value >> 48 ;
694
+ vbuf [7 ] = min_value >> 56 ;
695
+ secp256k1_sha256_initialize (& sha256 );
696
+ secp256k1_sha256_write (& sha256 , commit , 32 );
697
+ secp256k1_sha256_write (& sha256 , vbuf , 8 );
698
+ secp256k1_sha256_finalize (& sha256 , commit );
699
+ }
700
+
701
+ /* This breaks the abstraction of both the Pedersen commitment and the generator
702
+ * type by directly reading the parity bit and x-coordinate from the data. But
703
+ * the alternative using the _load functions is to do two full point decompression,
704
+ * and in my benchmarks we save ~80% of the rewinding time by avoiding this. -asp */
705
+ lrparity = 2 * !!(pcommit -> data [0 ] & 1 ) + !!(value_gen -> data [0 ] & 1 );
706
+ secp256k1_sha256_initialize (& sha256 );
707
+ secp256k1_sha256_write (& sha256 , commit , 32 );
708
+ secp256k1_sha256_write (& sha256 , & lrparity , 1 );
709
+ secp256k1_sha256_write (& sha256 , & pcommit -> data [1 ], 32 );
710
+ secp256k1_sha256_write (& sha256 , & value_gen -> data [1 ], 32 );
711
+ secp256k1_sha256_finalize (& sha256 , commit );
712
+
713
+ if (extra_commit != NULL ) {
714
+ secp256k1_sha256_initialize (& sha256 );
715
+ secp256k1_sha256_write (& sha256 , commit , 32 );
716
+ secp256k1_sha256_write (& sha256 , extra_commit , extra_commit_len );
717
+ secp256k1_sha256_finalize (& sha256 , commit );
718
+ }
719
+
720
+ /* Extract A and S to compute y and z */
721
+ lrparity = 2 * !!(proof [64 ] & 1 ) + !!(proof [64 ] & 2 );
722
+ /* y */
723
+ secp256k1_sha256_initialize (& sha256 );
724
+ secp256k1_sha256_write (& sha256 , commit , 32 );
725
+ secp256k1_sha256_write (& sha256 , & lrparity , 1 );
726
+ secp256k1_sha256_write (& sha256 , & proof [65 ], 64 );
727
+ secp256k1_sha256_finalize (& sha256 , commit );
728
+
729
+ /* z */
730
+ secp256k1_sha256_initialize (& sha256 );
731
+ secp256k1_sha256_write (& sha256 , commit , 32 );
732
+ secp256k1_sha256_write (& sha256 , & lrparity , 1 );
733
+ secp256k1_sha256_write (& sha256 , & proof [65 ], 64 );
734
+ secp256k1_sha256_finalize (& sha256 , commit );
735
+
736
+ secp256k1_scalar_set_b32 (& z , commit , & overflow );
737
+ if (overflow || secp256k1_scalar_is_zero (& z )) {
738
+ return 0 ;
739
+ }
740
+
741
+ /* x */
742
+ lrparity = 2 * !!(proof [64 ] & 4 ) + !!(proof [64 ] & 8 );
743
+ secp256k1_sha256_initialize (& sha256 );
744
+ secp256k1_sha256_write (& sha256 , commit , 32 );
745
+ secp256k1_sha256_write (& sha256 , & lrparity , 1 );
746
+ secp256k1_sha256_write (& sha256 , & proof [129 ], 64 );
747
+ secp256k1_sha256_finalize (& sha256 , commit );
748
+
749
+ secp256k1_scalar_set_b32 (& x , commit , & overflow );
750
+ if (overflow || secp256k1_scalar_is_zero (& x )) {
751
+ return 0 ;
752
+ }
753
+
754
+ /* Compute candidate mu and add to (negated) mu from proof to get value */
755
+ secp256k1_scalar_mul (& rho , & rho , & x );
756
+ secp256k1_scalar_add (& mu , & mu , & rho );
757
+ secp256k1_scalar_add (& mu , & mu , & alpha );
758
+
759
+ secp256k1_scalar_get_b32 (commit , & mu );
760
+ if (memcmp (commit , zero24 , 24 ) != 0 ) {
761
+ return 0 ;
762
+ }
763
+ * value = commit [31 ] + ((uint64_t ) commit [30 ] << 8 ) +
764
+ ((uint64_t ) commit [29 ] << 16 ) + ((uint64_t ) commit [28 ] << 24 ) +
765
+ ((uint64_t ) commit [27 ] << 32 ) + ((uint64_t ) commit [26 ] << 40 ) +
766
+ ((uint64_t ) commit [25 ] << 48 ) + ((uint64_t ) commit [24 ] << 56 );
767
+
768
+ /* Derive blinding factor */
769
+ secp256k1_scalar_mul (& tau1 , & tau1 , & x );
770
+ secp256k1_scalar_mul (& tau2 , & tau2 , & x );
771
+ secp256k1_scalar_mul (& tau2 , & tau2 , & x );
772
+
773
+ secp256k1_scalar_add (& taux , & taux , & tau1 );
774
+ secp256k1_scalar_add (& taux , & taux , & tau2 );
775
+
776
+ secp256k1_scalar_sqr (& z , & z );
777
+ secp256k1_scalar_inverse_var (& z , & z );
778
+ secp256k1_scalar_mul (blind , & taux , & z );
779
+ secp256k1_scalar_negate (blind , blind );
780
+
781
+ /* Check blinding factor */
782
+ secp256k1_pedersen_commitment_load (& commitp , pcommit );
783
+ secp256k1_generator_load (& value_genp , value_gen );
784
+
785
+ secp256k1_pedersen_ecmult (& rewind_commitj , blind , * value , & value_genp , blind_gen );
786
+ secp256k1_gej_neg (& rewind_commitj , & rewind_commitj );
787
+ secp256k1_gej_add_ge_var (& rewind_commitj , & rewind_commitj , & commitp , NULL );
788
+
789
+ return secp256k1_gej_is_infinity (& rewind_commitj );
790
+ }
791
+
648
792
#endif
0 commit comments