@@ -109,6 +109,105 @@ static int secp256k1_rangeproof_header_parse(
109
109
return secp256k1_rangeproof_header_expand (header );
110
110
}
111
111
112
+ static int secp256k1_rangeproof_header_set_for_value (
113
+ secp256k1_rangeproof_header * header ,
114
+ uint64_t * proven_value ,
115
+ const uint64_t min_value ,
116
+ const uint64_t min_bits ,
117
+ const int exp ,
118
+ const uint64_t value
119
+ ) {
120
+ memset (header , 0 , sizeof (* header ));
121
+ * proven_value = 0 ;
122
+
123
+ /* Sanity checks */
124
+ if (min_value > value || min_bits > 64 || exp < -1 || exp > 18 ) {
125
+ return 0 ;
126
+ }
127
+
128
+ /* Start by just using the user's requested values, then adjust them in
129
+ * various ways to make them compatible. This is probably not advisable
130
+ * from a privacy point-of-view but it's important to be compatible with
131
+ * the 2015-era API, and all of these issues will go away when we merge
132
+ * Bulletproofs. */
133
+ header -> exp = exp ;
134
+ header -> min_value = min_value ;
135
+ header -> mantissa = min_bits ? min_bits : 1 ; /* force mantissa to be nonzero */
136
+
137
+ /* Special-case single-value proofs */
138
+ if (header -> exp == -1 ) {
139
+ header -> mantissa = 0 ; /* ignore user's min_bits */
140
+ return secp256k1_rangeproof_header_expand (header );
141
+ }
142
+
143
+ /* Deal with extreme values (copied directly from 2015 code) */
144
+ if (min_bits > 61 || value > INT64_MAX ) {
145
+ /* Ten is not a power of two, so dividing by ten and then representing in base-2 times ten
146
+ * expands the representable range. The verifier requires the proven range is within 0..2**64.
147
+ * For very large numbers (all over 2**63) we must change our exponent to compensate.
148
+ * Rather than handling it precisely, this just disables use of the exponent for big values.
149
+ */
150
+ header -> exp = 0 ;
151
+ }
152
+ {
153
+ /* If the user has asked for more bits of proof then there is room for in the exponent, reduce the exponent. */
154
+ uint64_t max = min_bits ? (UINT64_MAX >> (64 - min_bits )) : 0 ;
155
+ int i ;
156
+ for (i = 0 ; i < header -> exp && max <= UINT64_MAX / 10 ; i ++ ) {
157
+ max *= 10 ;
158
+ }
159
+ header -> exp = i ;
160
+ }
161
+
162
+
163
+ /* Increase the mantissa from min_bits until it actually covers the proven value */
164
+ if (!secp256k1_rangeproof_header_expand (header )) {
165
+ return 0 ;
166
+ }
167
+ * proven_value = (value - header -> min_value ) / header -> scale ;
168
+ while (header -> mantissa < 64 && (* proven_value >> header -> mantissa ) > 0 ) {
169
+ header -> mantissa ++ ;
170
+ }
171
+ /* Fudge min_value so we don't lose the low-order digits of `value` */
172
+ header -> min_value = value - (* proven_value * header -> scale );
173
+
174
+ /* Increasing the mantissa will have increased the number of rings etc
175
+ * so re-expand the header to recompute the other derived values. */
176
+ return secp256k1_rangeproof_header_expand (header );
177
+ }
178
+
179
+ static int secp256k1_rangeproof_header_serialize (
180
+ unsigned char * proof ,
181
+ size_t plen ,
182
+ size_t * offset ,
183
+ const secp256k1_rangeproof_header * header
184
+ ) {
185
+ * offset = 0 ;
186
+ if (plen < 65 ) {
187
+ return 0 ;
188
+ }
189
+
190
+ /* Write control byte */
191
+ proof [0 ] = (header -> exp >= 0 ? (64 | header -> exp ) : 0 ) | (header -> min_value ? 32 : 0 );
192
+ * offset += 1 ;
193
+ /* Write mantissa, for non-exact-value proofs */
194
+ if (header -> exp >= 0 ) {
195
+ VERIFY_CHECK (header -> mantissa > 0 && header -> mantissa <= 64 );
196
+ proof [1 ] = header -> mantissa - 1 ;
197
+ * offset += 1 ;
198
+ }
199
+ /* Write min_value, if present */
200
+ if (header -> min_value > 0 ) {
201
+ size_t i ;
202
+ for (i = 0 ; i < 8 ; i ++ ) {
203
+ proof [* offset + i ] = (header -> min_value >> ((7 - i ) * 8 )) & 255 ;
204
+ }
205
+ * offset += 8 ;
206
+ }
207
+
208
+ return 1 ;
209
+ }
210
+
112
211
SECP256K1_INLINE static void secp256k1_rangeproof_pub_expand (secp256k1_gej * pubs ,
113
212
int exp , size_t * rsizes , size_t rings , const secp256k1_ge * genp ) {
114
213
secp256k1_gej base ;
@@ -202,88 +301,12 @@ SECP256K1_INLINE static int secp256k1_rangeproof_genrand(secp256k1_scalar *sec,
202
301
return ret ;
203
302
}
204
303
205
- SECP256K1_INLINE static int secp256k1_range_proveparams (uint64_t * v , size_t * rings , size_t * rsizes , size_t * npub , size_t * secidx , uint64_t * min_value ,
206
- int * mantissa , uint64_t * scale , int * exp , int * min_bits , uint64_t value ) {
207
- size_t i ;
208
- * rings = 1 ;
209
- rsizes [0 ] = 1 ;
210
- secidx [0 ] = 0 ;
211
- * scale = 1 ;
212
- * mantissa = 0 ;
213
- * npub = 0 ;
214
- if (* min_value == UINT64_MAX ) {
215
- /* If the minimum value is the maximal representable value, then we cannot code a range. */
216
- * exp = -1 ;
217
- }
218
- if (* exp >= 0 ) {
219
- int max_bits ;
220
- uint64_t v2 ;
221
- if ((* min_value && value > INT64_MAX ) || (value && * min_value >= INT64_MAX )) {
222
- /* If either value or min_value is >= 2^63-1 then the other must by zero to avoid overflowing the proven range. */
223
- return 0 ;
224
- }
225
- max_bits = * min_value ? secp256k1_clz64_var (* min_value ) : 64 ;
226
- if (* min_bits > max_bits ) {
227
- * min_bits = max_bits ;
228
- }
229
- if (* min_bits > 61 || value > INT64_MAX ) {
230
- /** Ten is not a power of two, so dividing by ten and then representing in base-2 times ten
231
- * expands the representable range. The verifier requires the proven range is within 0..2**64.
232
- * For very large numbers (all over 2**63) we must change our exponent to compensate.
233
- * Rather than handling it precisely, this just disables use of the exponent for big values.
234
- */
235
- * exp = 0 ;
236
- }
237
- /* Mask off the least significant digits, as requested. */
238
- * v = value - * min_value ;
239
- /* If the user has asked for more bits of proof then there is room for in the exponent, reduce the exponent. */
240
- v2 = * min_bits ? (UINT64_MAX >>(64 - * min_bits )) : 0 ;
241
- for (i = 0 ; (int ) i < * exp && (v2 <= UINT64_MAX / 10 ); i ++ ) {
242
- * v /= 10 ;
243
- v2 *= 10 ;
244
- }
245
- * exp = i ;
246
- v2 = * v ;
247
- for (i = 0 ; (int ) i < * exp ; i ++ ) {
248
- v2 *= 10 ;
249
- * scale *= 10 ;
250
- }
251
- /* If the masked number isn't precise, compute the public offset. */
252
- * min_value = value - v2 ;
253
- /* How many bits do we need to represent our value? */
254
- * mantissa = * v ? 64 - secp256k1_clz64_var (* v ) : 1 ;
255
- if (* min_bits > * mantissa ) {
256
- /* If the user asked for more precision, give it to them. */
257
- * mantissa = * min_bits ;
258
- }
259
- /* Digits in radix-4, except for the last digit if our mantissa length is odd. */
260
- * rings = (* mantissa + 1 ) >> 1 ;
261
- for (i = 0 ; i < * rings ; i ++ ) {
262
- rsizes [i ] = ((i < * rings - 1 ) | (!(* mantissa & 1 ))) ? 4 : 2 ;
263
- * npub += rsizes [i ];
264
- secidx [i ] = (* v >> (i * 2 )) & 3 ;
265
- }
266
- VERIFY_CHECK (* mantissa > 0 );
267
- VERIFY_CHECK ((* v & ~(UINT64_MAX >>(64 - * mantissa ))) == 0 ); /* Did this get all the bits? */
268
- } else {
269
- /* A proof for an exact value. */
270
- * exp = 0 ;
271
- * min_value = value ;
272
- * v = 0 ;
273
- * npub = 2 ;
274
- }
275
- VERIFY_CHECK (* v * * scale + * min_value == value );
276
- VERIFY_CHECK (* rings > 0 );
277
- VERIFY_CHECK (* rings <= 32 );
278
- VERIFY_CHECK (* npub <= 128 );
279
- return 1 ;
280
- }
281
-
282
304
/* strawman interface, writes proof in proof, a buffer of plen, proves with respect to min_value the range for commit which has the provided blinding factor and value. */
283
305
SECP256K1_INLINE static int secp256k1_rangeproof_sign_impl (const secp256k1_ecmult_gen_context * ecmult_gen_ctx ,
284
306
unsigned char * proof , size_t * plen , uint64_t min_value ,
285
307
const secp256k1_ge * commit , const unsigned char * blind , const unsigned char * nonce , int exp , int min_bits , uint64_t value ,
286
308
const unsigned char * message , size_t msg_len , const unsigned char * extra_commit , size_t extra_commit_len , const secp256k1_ge * genp ){
309
+ secp256k1_rangeproof_header header ;
287
310
secp256k1_gej pubs [128 ]; /* Candidate digits for our proof, most inferred. */
288
311
secp256k1_scalar s [128 ]; /* Signatures in our proof, most forged. */
289
312
secp256k1_scalar sec [32 ]; /* Blinding factors for the correct digits. */
@@ -294,44 +317,43 @@ SECP256K1_INLINE static int secp256k1_rangeproof_sign_impl(const secp256k1_ecmul
294
317
unsigned char tmp [33 ];
295
318
unsigned char * signs ; /* Location of sign flags in the proof. */
296
319
uint64_t v ;
297
- uint64_t scale ; /* scale = 10^exp. */
298
- int mantissa ; /* Number of bits proven in the blinded value. */
299
- size_t rings ; /* How many digits will our proof cover. */
300
- size_t rsizes [32 ]; /* How many possible values there are for each place. */
301
320
size_t secidx [32 ]; /* Which digit is the correct one. */
302
321
size_t len ; /* Number of bytes used so far. */
303
322
size_t i ;
323
+ size_t pub_idx ;
304
324
int overflow ;
305
- size_t npub ;
306
325
len = 0 ;
307
- if (* plen < 65 || min_value > value || min_bits > 64 || min_bits < 0 || exp < -1 || exp > 18 ) {
326
+ if (* plen < 65 ) {
308
327
return 0 ;
309
328
}
310
- if (!secp256k1_range_proveparams (& v , & rings , rsizes , & npub , secidx , & min_value , & mantissa , & scale , & exp , & min_bits , value )) {
329
+
330
+ if (!secp256k1_rangeproof_header_set_for_value (& header , & v , min_value , min_bits , exp , value )) {
311
331
return 0 ;
312
332
}
313
- proof [len ] = (rsizes [0 ] > 1 ? (64 | exp ) : 0 ) | (min_value ? 32 : 0 );
314
- len ++ ;
315
- if (rsizes [0 ] > 1 ) {
316
- VERIFY_CHECK (mantissa > 0 && mantissa <= 64 );
317
- proof [len ] = mantissa - 1 ;
318
- len ++ ;
319
- }
320
- if (min_value ) {
321
- for (i = 0 ; i < 8 ; i ++ ) {
322
- proof [len + i ] = (min_value >> ((7 - i ) * 8 )) & 255 ;
333
+ if (header .exp >= 0 ) {
334
+ for (i = 0 ; i < header .n_rings ; i ++ ) {
335
+ secidx [i ] = (v >> (i * 2 )) & 3 ;
323
336
}
324
- len += 8 ;
337
+ } else {
338
+ secidx [0 ] = 0 ;
325
339
}
340
+
341
+ VERIFY_CHECK (v * header .scale + header .min_value == value );
342
+ VERIFY_CHECK (header .n_rings > 0 );
343
+ VERIFY_CHECK (header .n_rings <= 32 );
344
+ VERIFY_CHECK (header .n_pubs <= 128 );
345
+
346
+ secp256k1_rangeproof_header_serialize (proof , * plen , & len , & header );
347
+
326
348
/* Do we have enough room in the proof for the message? Each ring gives us 128 bytes, but the
327
349
* final ring is used to encode the blinding factor and the value, so we can't use that. (Well,
328
350
* technically there are 64 bytes available if we avoided the other data, but this is difficult
329
351
* because it's not always in the same place. */
330
- if (msg_len > 0 && msg_len > 128 * (rings - 1 )) {
352
+ if (msg_len > 0 && msg_len > 128 * (header . n_rings - 1 )) {
331
353
return 0 ;
332
354
}
333
355
/* Do we have enough room for the proof? */
334
- if (* plen - len < 32 * (npub + rings - 1 ) + 32 + ((rings + 6 ) >> 3 )) {
356
+ if (* plen - len < 32 * (header . n_pubs + header . n_rings - 1 ) + 32 + ((header . n_rings + 6 ) >> 3 )) {
335
357
return 0 ;
336
358
}
337
359
secp256k1_sha256_initialize (& sha256_m );
@@ -346,23 +368,23 @@ SECP256K1_INLINE static int secp256k1_rangeproof_sign_impl(const secp256k1_ecmul
346
368
memcpy (prep , message , msg_len );
347
369
}
348
370
/* Note, the data corresponding to the blinding factors must be zero. */
349
- if (rsizes [rings - 1 ] > 1 ) {
371
+ if (header . rsizes [header . n_rings - 1 ] > 1 ) {
350
372
size_t idx ;
351
373
/* Value encoding sidechannel. */
352
- idx = rsizes [rings - 1 ] - 1 ;
353
- idx -= secidx [rings - 1 ] == idx ;
354
- idx = ((rings - 1 ) * 4 + idx ) * 32 ;
374
+ idx = header . rsizes [header . n_rings - 1 ] - 1 ;
375
+ idx -= secidx [header . n_rings - 1 ] == idx ;
376
+ idx = ((header . n_rings - 1 ) * 4 + idx ) * 32 ;
355
377
for (i = 0 ; i < 8 ; i ++ ) {
356
378
prep [8 + i + idx ] = prep [16 + i + idx ] = prep [24 + i + idx ] = (v >> (56 - i * 8 )) & 255 ;
357
379
prep [i + idx ] = 0 ;
358
380
}
359
381
prep [idx ] = 128 ;
360
382
}
361
- if (!secp256k1_rangeproof_genrand (sec , s , prep , rsizes , rings , nonce , commit , proof , len , genp )) {
383
+ if (!secp256k1_rangeproof_genrand (sec , s , prep , header . rsizes , header . n_rings , nonce , commit , proof , len , genp )) {
362
384
return 0 ;
363
385
}
364
386
memset (prep , 0 , 4096 );
365
- for (i = 0 ; i < rings ; i ++ ) {
387
+ for (i = 0 ; i < header . n_rings ; i ++ ) {
366
388
/* Sign will overwrite the non-forged signature, move that random value into the nonce. */
367
389
k [i ] = s [i * 4 + secidx [i ]];
368
390
secp256k1_scalar_clear (& s [i * 4 + secidx [i ]]);
@@ -374,50 +396,51 @@ SECP256K1_INLINE static int secp256k1_rangeproof_sign_impl(const secp256k1_ecmul
374
396
* blinded value for one digit.
375
397
*/
376
398
secp256k1_scalar_set_b32 (& stmp , blind , & overflow );
377
- secp256k1_scalar_add (& sec [rings - 1 ], & sec [rings - 1 ], & stmp );
378
- if (overflow || secp256k1_scalar_is_zero (& sec [rings - 1 ])) {
399
+ secp256k1_scalar_add (& sec [header . n_rings - 1 ], & sec [header . n_rings - 1 ], & stmp );
400
+ if (overflow || secp256k1_scalar_is_zero (& sec [header . n_rings - 1 ])) {
379
401
return 0 ;
380
402
}
381
403
signs = & proof [len ];
382
404
/* We need one sign bit for each blinded value we send. */
383
- for (i = 0 ; i < (rings + 6 ) >> 3 ; i ++ ) {
405
+ for (i = 0 ; i < (header . n_rings + 6 ) >> 3 ; i ++ ) {
384
406
signs [i ] = 0 ;
385
407
len ++ ;
386
408
}
387
- npub = 0 ;
388
- for (i = 0 ; i < rings ; i ++ ) {
409
+ pub_idx = 0 ;
410
+ for (i = 0 ; i < header . n_rings ; i ++ ) {
389
411
/*OPT: Use the precomputed gen2 basis?*/
390
- secp256k1_pedersen_ecmult (ecmult_gen_ctx , & pubs [npub ], & sec [i ], ((uint64_t )secidx [i ] * scale ) << (i * 2 ), genp );
391
- if (secp256k1_gej_is_infinity (& pubs [npub ])) {
412
+ secp256k1_pedersen_ecmult (ecmult_gen_ctx , & pubs [pub_idx ], & sec [i ], ((uint64_t )secidx [i ] * header . scale ) << (i * 2 ), genp );
413
+ if (secp256k1_gej_is_infinity (& pubs [pub_idx ])) {
392
414
return 0 ;
393
415
}
394
- if (i < rings - 1 ) {
416
+ if (i < header . n_rings - 1 ) {
395
417
unsigned char tmpc [33 ];
396
418
secp256k1_ge c ;
397
419
unsigned char quadness ;
398
420
/*OPT: split loop and batch invert.*/
399
- /*OPT: do not compute full pubs[npub ] in ge form; we only need x */
400
- secp256k1_ge_set_gej_var (& c , & pubs [npub ]);
421
+ /*OPT: do not compute full pubs[pub_idx ] in ge form; we only need x */
422
+ secp256k1_ge_set_gej_var (& c , & pubs [pub_idx ]);
401
423
secp256k1_rangeproof_serialize_point (tmpc , & c );
402
424
quadness = tmpc [0 ];
403
425
secp256k1_sha256_write (& sha256_m , tmpc , 33 );
404
426
signs [i >>3 ] |= quadness << (i & 7 );
405
427
memcpy (& proof [len ], tmpc + 1 , 32 );
406
428
len += 32 ;
407
429
}
408
- npub += rsizes [i ];
430
+ pub_idx += header . rsizes [i ];
409
431
}
410
- secp256k1_rangeproof_pub_expand (pubs , exp , rsizes , rings , genp );
432
+ VERIFY_CHECK (pub_idx == header .n_pubs );
433
+ secp256k1_rangeproof_pub_expand (pubs , header .exp , header .rsizes , header .n_rings , genp );
411
434
if (extra_commit != NULL ) {
412
435
secp256k1_sha256_write (& sha256_m , extra_commit , extra_commit_len );
413
436
}
414
437
secp256k1_sha256_finalize (& sha256_m , tmp );
415
- if (!secp256k1_borromean_sign (ecmult_gen_ctx , & proof [len ], s , pubs , k , sec , rsizes , secidx , rings , tmp , 32 )) {
438
+ if (!secp256k1_borromean_sign (ecmult_gen_ctx , & proof [len ], s , pubs , k , sec , header . rsizes , secidx , header . n_rings , tmp , 32 )) {
416
439
return 0 ;
417
440
}
418
441
len += 32 ;
419
- for (i = 0 ; i < npub ; i ++ ) {
420
- secp256k1_scalar_get_b32 (& proof [len ],& s [i ]);
442
+ for (i = 0 ; i < pub_idx ; i ++ ) {
443
+ secp256k1_scalar_get_b32 (& proof [len ], & s [i ]);
421
444
len += 32 ;
422
445
}
423
446
VERIFY_CHECK (len <= * plen );
0 commit comments