13
13
#include "modules/bulletproofs/main_impl.h"
14
14
#include "modules/bulletproofs/util.h"
15
15
16
+ /* Number of scalars that should remain at the end of a recursive proof. The paper
17
+ * uses 2, by reducing the scalars as far as possible. We stop one recursive step
18
+ * early, trading two points (L, R) for two scalars, which reduces verification
19
+ * and prover cost.
20
+ *
21
+ * For the most part, all comments assume this value is at 4.
22
+ */
16
23
#define IP_AB_SCALARS 4
17
24
25
+ /* Bulletproof inner products consist of the four scalars and `2[log2(n) - 1]` points
26
+ * `a_1`, `a_2`, `b_1`, `b_2`, `L_i` and `R_i`, where `i` ranges from 0 to `log2(n)-1`.
27
+ *
28
+ * The prover takes as input a point `P` and scalar `c`. It proves that it knows
29
+ * scalars `a_i`, `b_i` for `i` ranging from 1 to `n`, such that
30
+ * `P = sum_i [a_i G_i + b_i H_i]` and `<{a_i}, {b_i}> = c`,
31
+ * where `G_i` and `H_i` are standard NUMS generators.
32
+ *
33
+ * Verification of the proof comes down to a single multiexponentiation of the form
34
+ *
35
+ * P + (c - a_1*b_1 - a_2*b_2)*x*G
36
+ * - sum_{i=1}^n [s'_i*G_i + s_i*H_i]
37
+ * + sum_{i=1}^log2(n) [x_i^-2 L_i + x_i^2 R_i]
38
+ *
39
+ * which will equal infinity if the inner product proof is correct. Here
40
+ * - `G` is the standard secp generator
41
+ * - `x` is a hash of `commit` and is used to rerandomize `c`. See Protocol 2 vs Protocol 1 in the paper.
42
+ * - `x_i = H(x_{i-1} || L_i || R_i)`, where `x_{-1}` is passed through the `commit` variable and
43
+ * must be a commitment to `P` and `c`.
44
+ * - `s_i` and `s'_i` are computed as follows.
45
+ *
46
+ * Letting `i_j` be defined as 1 if `i & 2^j == 1`, and -1 otherwise,
47
+ * - For `i` from `1` to `n/2`, `s'_i = a_1 * prod_{j=1}^log2(n) x_j^i_j`
48
+ * - For `i` from `n/2 + 1` to `n`, `s'_i = a_2 * prod_{j=1}^log2(n) x_j^i_j`
49
+ * - For `i` from `1` to `n/2`, `s_i = b_1 * prod_{j=1}^log2(n) x_j^-i_j`
50
+ * - For `i` from `n/2 + 1` to `n`, `s_i = b_2 * prod_{j=1}^log2(n) x_j^-i_j`
51
+ *
52
+ * Observe that these can be computed iteratively by labelling the coefficients `s_i` for `i`
53
+ * from `0` to `2n-1` rather than 1-indexing and distinguishing between `s_i'`s and `s_i`s:
54
+ *
55
+ * Start with `s_0 = a_1 * prod_{j=1}^log2(n) x_j^-1`, then for later `s_i`s,
56
+ * - For `i` from `1` to `n/2 - 1`, multiply some earlier `s'_j` by some `x_k^2`
57
+ * - For `i = n/2`, multiply `s_{i-1} by `a_2/a_1`.
58
+ * - For `i` from `n/2 + 1` to `n - 1`, multiply some earlier `s'_j` by some `x_k^2`
59
+ * - For `i = n`, multiply `s'_{i-1}` by `b_1/a_2` to get `s_i`.
60
+ * - For `i` from `n + 1` to `3n/2 - 1`, multiply some earlier `s_j` by some `x_k^-2`
61
+ * - For `i = 3n/2`, multiply `s_{i-1}` by `b_2/b_1`.
62
+ * - For `i` from `3n/2 + 1` to `2n - 1`, multiply some earlier `s_j` by some `x_k^-2`
63
+ * where of course, the indices `j` and `k` must be chosen carefully.
64
+ *
65
+ * The bulk of `secp256k1_bulletproof_innerproduct_vfy_ecmult_callback` involves computing
66
+ * these indices, given `a_2/a_1`, `b_1/a_1`, `b_2/b_1`, and the `x_k^2`s as input. It
67
+ * computes `x_k^-2` as a side-effect of its other computation.
68
+ */
69
+
18
70
typedef int (secp256k1_bulletproof_vfy_callback )(secp256k1_scalar * sc , secp256k1_ge * pt , secp256k1_scalar * randomizer , size_t idx , void * data );
19
71
20
72
/* used by callers to wrap a proof with surrounding context */
@@ -64,111 +116,116 @@ size_t secp256k1_bulletproof_innerproduct_proof_length(size_t n) {
64
116
}
65
117
}
66
118
67
- /* Bulletproof rangeproof verification comes down to a single multiexponentiation of the form
68
- *
69
- * P + (c-a*b)*x*G - sum_{i=1}^n [a*s'_i*G_i + b*s_i*H_i] + sum_{i=1}^log2(n) [x_i^-2 L_i + x_i^2 R_i
70
- *
71
- * which will equal infinity if the rangeproof is correct. Here
72
- * - `G_i` and `H_i` are standard NUMS generators. `G` is the standard secp256k1 generator.
73
- * - `P` and `c` are inputs to the proof, which claims that there exist `a_i` and `b_i`, `i` ranging
74
- * from 0 to `n-1`, such that `P = sum_i [a_i G_i + b_i H_i]` and that `<{a_i}, {b_i}> = c`.
75
- * - `a`, `b`, `L_i` and `R_i`are auxillary components of the proof, where `i` ranges from 0 to `log2(n)-1`.
76
- * - `x_i = H(x_{i-1} || L_i || R_i)`, where `x_{-1}` is passed through the `commit` variable and
77
- * must be a commitment to `P` and `c`.
78
- * - `x` is a hash of `commit` and is used to rerandomize `c`. See Protocol 2 vs Protocol 1 in the paper.
79
- * - `s_i` and `s'_i` are computed as follows.
80
- *
81
- * For each `i` between 0 and `n-1` inclusive, let `b_{ij}` be -1 (1) if the `j`th bit of `i` is zero (one).
82
- * Here `j` ranges from 0 to `log2(n)-1`. Then for each such `i` we define
83
- * - `s_i = prod_j x_j^{b_{ij}}`
84
- * - `s'_i = 1/s_i`
85
- *
86
- * Alternately we can define `s_i` and `s'_i` recursively as follows:
87
- * - `s_0 = s`_{n - 1} = 1 / prod_j x_j`
88
- * - `s_i = s'_{n - 1 - i} = s_{i - 2^j} * x_j^2` where `j = i & (i - 1)` is `i` with its least significant 1 set to 0.
89
- *
90
- * Our ecmult_multi function takes `(c - a*b)*x` directly and multiplies this by `G`. For every other
119
+ /* Our ecmult_multi function takes `(c - a*b)*x` directly and multiplies this by `G`. For every other
91
120
* (scalar, point) pair it calls the following callback function, which takes an index and outputs a
92
121
* pair. The function therefore has three regimes:
93
122
*
94
- * For the first `2n` invocations, it alternately returns `(s'_{n - i}, G_{n - i})` and `(s_i, H_i)`,
95
- * where `i` is `floor(idx / 2)`. The reason for the funny indexing is that we use the above recursive
96
- * definition of `s_i` and `s'_i` which produces each element with only a single scalar multiplication,
97
- * but in this mixed order. (We start with an array of `x_j^2` for each `x_j`.)
98
- *
99
- * As a side-effect, whenever `n - i = 2^j` for some `j`, `s_i = x_j^{-1} * prod_{j' != j} x_{j'}`,
100
- * so `x_j^{-2} = s_i*s_0`. Therefore we compute an array of inverse squares during this computation,
101
- * using only one multiplication per. We will need it in the following step.
102
- *
103
- * For the next `2*log2(n)` invocations it alternately returns `(x_i^-2, L_i)` and `(x_i^2, R_i)`
104
- * where `i` is `idx - 2*n`.
123
+ * For the first `n` invocations, it returns `(s'_i, G_i)` for `i` from 1 to `n`.
124
+ * For the next `n` invocations, it returns `(s_i, H_i)` for `i` from 1 to `n`.
125
+ * For the next `2*log2(n)` invocations it returns `(x_i^-2, L_i)` and `(x_i^2, R_i)`,
126
+ * alternating between the two choices, for `i` from 1 to `log2(n)`.
105
127
*
106
128
* For the remaining invocations it passes through to another callback, `rangeproof_cb_data` which
107
129
* computes `P`. The reason for this is that in practice `P` is usually defined by another multiexp
108
130
* rather than being a known point, and it is more efficient to compute one exponentiation.
109
131
*
132
+ * Inline we refer to the first `2n` coefficients as `s_i` for `i` from 0 to `2n-1`, since that
133
+ * is the more convenient indexing. In particular we describe (a) how the indices `j` and `k`,
134
+ * from the big comment block above, are chosen; and (b) when/how each `x_k^-2` is computed.
110
135
*/
111
-
112
- /* For the G and H generators, we choose the ith generator with a scalar computed from the
113
- * L/R hashes as follows: prod_{j=1}^m x_j^{e_j}, where each exponent e_j is either -1 or 1.
114
- * The choice directly maps to the bits of i: for the G generators, a 0 bit means e_j is 1
115
- * and a 1 bit means e_j is -1. For the H generators it is the opposite. Finally, each of the
116
- * G scalars is further multiplied by -a, while each of the H scalars is further multiplied
117
- * by -b.
118
- *
119
- * These scalars are computed starting from I, the inverse of the product of every x_j, which
120
- * is then selectively multiplied by x_j^2 for whichever j's are needed. As it turns out, by
121
- * caching logarithmically many scalars, this can always be done by multiplying one of the
122
- * cached values by a single x_j, rather than starting from I and doing multiple multiplications.
123
- */
124
-
125
136
static int secp256k1_bulletproof_innerproduct_vfy_ecmult_callback (secp256k1_scalar * sc , secp256k1_ge * pt , size_t idx , void * data ) {
126
137
secp256k1_bulletproof_innerproduct_vfy_ecmult_context * ctx = (secp256k1_bulletproof_innerproduct_vfy_ecmult_context * ) data ;
127
138
128
- /* First 2N points use the standard Gi, Hi generators, and the scalars can be aggregated across proofs */
139
+ /* First 2N points use the standard Gi, Hi generators, and the scalars can be aggregated across proofs.
140
+ * Inside this if clause, `idx` corresponds to the index `i` in the big comment, and runs from 0 to `2n-1`.
141
+ * Also `ctx->vec_len` corresponds to `n`. */
129
142
if (idx < 2 * ctx -> vec_len ) {
143
+ /* Number of `a` scalars in the proof (same as number of `b` scalars in the proof). Will
144
+ * be 2 except for very small proofs that have fewer than 2 scalars as input. */
130
145
const size_t grouping = ctx -> vec_len < IP_AB_SCALARS / 2 ? ctx -> vec_len : IP_AB_SCALARS / 2 ;
131
146
const size_t lg_grouping = secp256k1_floor_lg (grouping );
132
147
size_t i ;
133
- /* TODO zero this point when appropriate for non-2^n numbers of pairs */
148
+ VERIFY_CHECK (lg_grouping == 0 || lg_grouping == 1 ); /* TODO support higher IP_AB_SCALARS */
149
+
150
+ /* Determine whether we're multiplying by `G_i`s or `H_i`s. */
134
151
if (idx < ctx -> vec_len ) {
135
152
* pt = ctx -> geng [idx ];
136
153
} else {
137
154
* pt = ctx -> genh [idx - ctx -> vec_len ];
138
155
}
139
156
140
157
secp256k1_scalar_clear (sc );
158
+ /* Loop over all the different inner product proofs we might be doing at once. Since they
159
+ * share generators `G_i` and `H_i`, we compute all of their scalars at once and add them.
160
+ * For each proof we start with the "seed value" `ctx->proof[i].xcache[0]` (see next comment
161
+ * for its meaning) from which every other scalar derived. We expect the caller to have
162
+ * randomized this to ensure that this wanton addition cannot enable cancellation attacks.
163
+ */
141
164
for (i = 0 ; i < ctx -> n_proofs ; i ++ ) {
165
+ /* To recall from the introductory comment: most `s_i` values are computed by taking an
166
+ * earlier `s_j` value and multiplying it by some `x_k^2`.
167
+ *
168
+ * We now explain the index `j`: it is the largest number with one fewer 1-bits than `i`.
169
+ * Alternately, the most recently returned `s_j` where `j` has one fewer 1-bits than `i`.
170
+ *
171
+ * To ensure that `s_j` is available when we need it, on each iteration we define the
172
+ * variable `cache_idx` which simply counts the 1-bits in `i`; before returning `s_i`
173
+ * we store it in `ctx->proof[i].xcache[cache_idx]`. Then later, when we want "most
174
+ * recently returned `s_j` with one fewer 1-bits than `i`, it'll be sitting right
175
+ * there in `ctx->proof[i].xcache[cache_idx - 1]`.
176
+ *
177
+ * Note that `ctx->proof[i].xcache[0]` will always equal `-a_1 * prod_{i=1}^{n-1} x_i^-2`,
178
+ * and we expect the caller to have set this.
179
+ */
142
180
const size_t cache_idx = secp256k1_popcountl (idx );
143
181
secp256k1_scalar term ;
144
182
VERIFY_CHECK (cache_idx < SECP256K1_BULLETPROOF_MAX_DEPTH );
145
- /* Compute the normal inner-product scalar.. . */
183
+ /* For the special case `cache_idx == 0` (which is true iff `idx == 0`) there is nothing to do . */
146
184
if (cache_idx > 0 ) {
185
+ /* Otherwise, check if this is one of the special indices where we transition from `a_1` to `a_2`,
186
+ * from `a_2` to `b_1`, or from `b_1` to `b_2`. (For small proofs there is only one transition,
187
+ * from `a` to `b`.) */
147
188
if (idx % (ctx -> vec_len / grouping ) == 0 ) {
148
189
const size_t abinv_idx = idx / (ctx -> vec_len / grouping ) - 1 ;
149
190
size_t prev_cache_idx ;
191
+ /* Check if it's the even specialer index where we're transitioning from `a`s to `b`s, from
192
+ * `G`s to `H`s, and from `x_k^2`s to `x_k^-2`s. In rangeproof and circuit applications,
193
+ * the caller secretly has a variable `y` such that `H_i` is really `y^-i H_i` for `i` ranging
194
+ * from 0 to `n-1`. Rather than forcing the caller to tweak every `H_i` herself, which would
195
+ * be very slow and prevent precomputation, we instead multiply our cached `x_k^-2` values
196
+ * by `y^(-2^k)` respectively, which will ultimately result in every `s_i` we return having
197
+ * been multiplied by `y^-i`.
198
+ *
199
+ * This is an underhanded trick but the result is that all `n` powers of `y^-i` show up
200
+ * in the right place, and we only need log-many scalar squarings and multiplications.
201
+ */
150
202
if (idx == ctx -> vec_len ) {
151
- /* Transition from G to H, a's to b's */
152
203
secp256k1_scalar yinvn = ctx -> proof [i ].proof -> yinv ;
153
204
size_t j ;
154
205
prev_cache_idx = secp256k1_popcountl (idx - 1 );
155
206
for (j = 0 ; j < (size_t ) secp256k1_ctzl (idx ) - lg_grouping ; j ++ ) {
156
207
secp256k1_scalar_mul (& ctx -> proof [i ].xsqinvy [j ], & ctx -> proof [i ].xsqinv [j ], & yinvn );
157
208
secp256k1_scalar_sqr (& yinvn , & yinvn );
158
209
}
159
- for (j = 0 ; j < lg_grouping ; j ++ ) {
160
- /* TODO this only does the right thing for lg_grouping = 0 or 1 */
210
+ if (lg_grouping == 1 ) {
161
211
secp256k1_scalar_mul (& ctx -> proof [i ].abinv [2 ], & ctx -> proof [i ].abinv [2 ], & yinvn );
162
212
secp256k1_scalar_sqr (& yinvn , & yinvn );
163
213
}
164
214
} else {
165
215
prev_cache_idx = cache_idx - 1 ;
166
216
}
217
+ /* Regardless of specialness, we multiply by `a_2/a_1` or whatever the appropriate multiplier
218
+ * is. We expect the caller to have given these to us in the `ctx->proof[i].abinv` array. */
167
219
secp256k1_scalar_mul (
168
220
& ctx -> proof [i ].xcache [cache_idx ],
169
221
& ctx -> proof [i ].xcache [prev_cache_idx ],
170
222
& ctx -> proof [i ].abinv [abinv_idx ]
171
223
);
224
+ /* If it's *not* a special index, just multiply by the appropriate `x_k^2`, or `x_k^-2` in case
225
+ * we're in the `H_i` half of the multiexp. At this point we can explain the index `k`, which
226
+ * is computed in the variable `xsq_idx` (`xsqinv_idx` respectively). In light of our discussion
227
+ * of `j`, we see that this should be "the least significant bit that's 1 in `i` but not `i-1`."
228
+ * In other words, it is the number of trailing 0 bits in the index `i`. */
172
229
} else if (idx < ctx -> vec_len ) {
173
230
const size_t xsq_idx = secp256k1_ctzl (idx );
174
231
secp256k1_scalar_mul (& ctx -> proof [i ].xcache [cache_idx ], & ctx -> proof [i ].xcache [cache_idx - 1 ], & ctx -> proof [i ].xsq [xsq_idx ]);
@@ -179,14 +236,19 @@ static int secp256k1_bulletproof_innerproduct_vfy_ecmult_callback(secp256k1_scal
179
236
}
180
237
term = ctx -> proof [i ].xcache [cache_idx ];
181
238
182
- /* When going through the G generators, compute the x-inverses as side effects */
183
- if (idx < ctx -> vec_len / grouping && secp256k1_popcountl (idx ) == ctx -> lg_vec_len - 1 ) { /* if the scalar has only one 0, i.e. only one inverse... */
239
+ /* One last trick: compute `x_k^-2` while computing the `G_i` scalars, so that they'll be
240
+ * available when we need them for the `H_i` scalars. We can do this for every `i` value
241
+ * that has exactly one 0-bit, i.e. which is a product of all `x_i`s and one `x_k^-1`. By
242
+ * multiplying that by the special value `prod_{i=1}^n x_i^-1` we obtain simply `x_k^-2`.
243
+ * We expect the caller to give us this special value in `ctx->proof[i].xsqinv_mask`. */
244
+ if (idx < ctx -> vec_len / grouping && secp256k1_popcountl (idx ) == ctx -> lg_vec_len - 1 ) {
184
245
const size_t xsqinv_idx = secp256k1_ctzl (~idx );
185
- /* ...multiply it by the total inverse, to get x_j^-2 */
186
246
secp256k1_scalar_mul (& ctx -> proof [i ].xsqinv [xsqinv_idx ], & ctx -> proof [i ].xcache [cache_idx ], & ctx -> proof [i ].xsqinv_mask );
187
247
}
188
248
189
- /* ...add whatever offset the rangeproof wants... */
249
+ /* Finally, if the caller, in its computation of `P`, wants to multiply `G_i` or `H_i` by some scalar,
250
+ * we add that to our sum as well. Again, we trust the randomization in `xcache[0]` to prevent any
251
+ * cancellation attacks here. */
190
252
if (ctx -> proof [i ].proof -> rangeproof_cb != NULL ) {
191
253
secp256k1_scalar rangeproof_offset ;
192
254
if ((ctx -> proof [i ].proof -> rangeproof_cb )(& rangeproof_offset , NULL , & ctx -> randomizer [i ], idx , ctx -> proof [i ].proof -> rangeproof_cb_data ) != 1 ) {
@@ -416,7 +478,7 @@ static int secp256k1_bulletproof_inner_product_verify_impl(const secp256k1_ecmul
416
478
for (j = n_ab - 1 ; j > 0 ; j -- ) {
417
479
size_t prev_idx ;
418
480
if (j == n_ab / 2 ) {
419
- prev_idx = j - 1 ; /* we go from a_n to b_0 */
481
+ prev_idx = j - 1 ; /* we go from a_{n-1} to b_0 */
420
482
} else {
421
483
prev_idx = j & (j - 1 ); /* but from a_i' to a_i, where i' is i with its lowest set bit unset */
422
484
}
@@ -702,7 +764,7 @@ static int secp256k1_bulletproof_inner_product_prove_impl(const secp256k1_ecmult
702
764
}
703
765
* proof_len = secp256k1_bulletproof_innerproduct_proof_length (n );
704
766
705
- /* Special-case lengths 0 and 1 whose proofs are just expliict lists of scalars */
767
+ /* Special-case lengths 0 and 1 whose proofs are just explicit lists of scalars */
706
768
if (n <= IP_AB_SCALARS / 2 ) {
707
769
secp256k1_scalar a [IP_AB_SCALARS / 2 ];
708
770
secp256k1_scalar b [IP_AB_SCALARS / 2 ];
0 commit comments