ecmult_impl.h
1 /****************************************************************************** 2 * Copyright (c) 2013, 2014, 2017 Pieter Wuille, Andrew Poelstra, Jonas Nick * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or https://www.opensource.org/licenses/mit-license.php. * 5 ******************************************************************************/ 6 7 #ifndef SECP256K1_ECMULT_IMPL_H 8 #define SECP256K1_ECMULT_IMPL_H 9 10 #include <string.h> 11 #include <stdint.h> 12 13 #include "util.h" 14 #include "group.h" 15 #include "scalar.h" 16 #include "ecmult.h" 17 #include "precomputed_ecmult.h" 18 19 #if defined(EXHAUSTIVE_TEST_ORDER) 20 /* We need to lower these values for exhaustive tests because 21 * the tables cannot have infinities in them (this breaks the 22 * affine-isomorphism stuff which tracks z-ratios) */ 23 # if EXHAUSTIVE_TEST_ORDER > 128 24 # define WINDOW_A 5 25 # elif EXHAUSTIVE_TEST_ORDER > 8 26 # define WINDOW_A 4 27 # else 28 # define WINDOW_A 2 29 # endif 30 #else 31 /* optimal for 128-bit and 256-bit exponents. */ 32 # define WINDOW_A 5 33 /** Larger values for ECMULT_WINDOW_SIZE result in possibly better 34 * performance at the cost of an exponentially larger precomputed 35 * table. The exact table size is 36 * (1 << (WINDOW_G - 2)) * sizeof(secp256k1_ge_storage) bytes, 37 * where sizeof(secp256k1_ge_storage) is typically 64 bytes but can 38 * be larger due to platform-specific padding and alignment. 39 * Two tables of this size are used (due to the endomorphism 40 * optimization). 41 */ 42 #endif 43 44 #define WNAF_BITS 128 45 #define WNAF_SIZE_BITS(bits, w) CEIL_DIV(bits, w) 46 #define WNAF_SIZE(w) WNAF_SIZE_BITS(WNAF_BITS, w) 47 48 /* The number of objects allocated on the scratch space for ecmult_multi algorithms */ 49 #define PIPPENGER_SCRATCH_OBJECTS 6 50 #define STRAUSS_SCRATCH_OBJECTS 5 51 52 #define PIPPENGER_MAX_BUCKET_WINDOW 12 53 54 /* Minimum number of points for which pippenger_wnaf is faster than strauss wnaf */ 55 #define ECMULT_PIPPENGER_THRESHOLD 88 56 57 #define ECMULT_MAX_POINTS_PER_BATCH 5000000 58 59 /** Fill a table 'pre_a' with precomputed odd multiples of a. 60 * pre_a will contain [1*a,3*a,...,(2*n-1)*a], so it needs space for n group elements. 61 * zr needs space for n field elements. 62 * 63 * Although pre_a is an array of _ge rather than _gej, it actually represents elements 64 * in Jacobian coordinates with their z coordinates omitted. The omitted z-coordinates 65 * can be recovered using z and zr. Using the notation z(b) to represent the omitted 66 * z coordinate of b: 67 * - z(pre_a[n-1]) = 'z' 68 * - z(pre_a[i-1]) = z(pre_a[i]) / zr[i] for n > i > 0 69 * 70 * Lastly the zr[0] value, which isn't used above, is set so that: 71 * - a.z = z(pre_a[0]) / zr[0] 72 */ 73 static void secp256k1_ecmult_odd_multiples_table(size_t n, secp256k1_ge *pre_a, secp256k1_fe *zr, secp256k1_fe *z, const secp256k1_gej *a) { 74 secp256k1_gej d, ai; 75 secp256k1_ge d_ge; 76 size_t i; 77 78 VERIFY_CHECK(!secp256k1_gej_is_infinity(a)); 79 80 secp256k1_gej_double_var(&d, a, NULL); 81 82 /* 83 * Perform the additions using an isomorphic curve Y^2 = X^3 + 7*C^6 where C := d.z. 84 * The isomorphism, phi, maps a secp256k1 point (x, y) to the point (x*C^2, y*C^3) on the other curve. 85 * In Jacobian coordinates phi maps (x, y, z) to (x*C^2, y*C^3, z) or, equivalently to (x, y, z/C). 86 * 87 * phi(x, y, z) = (x*C^2, y*C^3, z) = (x, y, z/C) 88 * d_ge := phi(d) = (d.x, d.y, 1) 89 * ai := phi(a) = (a.x*C^2, a.y*C^3, a.z) 90 * 91 * The group addition functions work correctly on these isomorphic curves. 92 * In particular phi(d) is easy to represent in affine coordinates under this isomorphism. 93 * This lets us use the faster secp256k1_gej_add_ge_var group addition function that we wouldn't be able to use otherwise. 94 */ 95 secp256k1_ge_set_xy(&d_ge, &d.x, &d.y); 96 secp256k1_ge_set_gej_zinv(&pre_a[0], a, &d.z); 97 secp256k1_gej_set_ge(&ai, &pre_a[0]); 98 ai.z = a->z; 99 100 /* pre_a[0] is the point (a.x*C^2, a.y*C^3, a.z*C) which is equivalent to a. 101 * Set zr[0] to C, which is the ratio between the omitted z(pre_a[0]) value and a.z. 102 */ 103 zr[0] = d.z; 104 105 for (i = 1; i < n; i++) { 106 secp256k1_gej_add_ge_var(&ai, &ai, &d_ge, &zr[i]); 107 secp256k1_ge_set_xy(&pre_a[i], &ai.x, &ai.y); 108 } 109 110 /* Multiply the last z-coordinate by C to undo the isomorphism. 111 * Since the z-coordinates of the pre_a values are implied by the zr array of z-coordinate ratios, 112 * undoing the isomorphism here undoes the isomorphism for all pre_a values. 113 */ 114 secp256k1_fe_mul(z, &ai.z, &d.z); 115 } 116 117 SECP256K1_INLINE static void secp256k1_ecmult_table_verify(int n, int w) { 118 (void)n; 119 (void)w; 120 VERIFY_CHECK(((n) & 1) == 1); 121 VERIFY_CHECK((n) >= -((1 << ((w)-1)) - 1)); 122 VERIFY_CHECK((n) <= ((1 << ((w)-1)) - 1)); 123 } 124 125 SECP256K1_INLINE static void secp256k1_ecmult_table_get_ge(secp256k1_ge *r, const secp256k1_ge *pre, int n, int w) { 126 secp256k1_ecmult_table_verify(n,w); 127 if (n > 0) { 128 *r = pre[(n-1)/2]; 129 } else { 130 *r = pre[(-n-1)/2]; 131 secp256k1_fe_negate(&(r->y), &(r->y), 1); 132 } 133 } 134 135 SECP256K1_INLINE static void secp256k1_ecmult_table_get_ge_lambda(secp256k1_ge *r, const secp256k1_ge *pre, const secp256k1_fe *x, int n, int w) { 136 secp256k1_ecmult_table_verify(n,w); 137 if (n > 0) { 138 secp256k1_ge_set_xy(r, &x[(n-1)/2], &pre[(n-1)/2].y); 139 } else { 140 secp256k1_ge_set_xy(r, &x[(-n-1)/2], &pre[(-n-1)/2].y); 141 secp256k1_fe_negate(&(r->y), &(r->y), 1); 142 } 143 } 144 145 SECP256K1_INLINE static void secp256k1_ecmult_table_get_ge_storage(secp256k1_ge *r, const secp256k1_ge_storage *pre, int n, int w) { 146 secp256k1_ecmult_table_verify(n,w); 147 if (n > 0) { 148 secp256k1_ge_from_storage(r, &pre[(n-1)/2]); 149 } else { 150 secp256k1_ge_from_storage(r, &pre[(-n-1)/2]); 151 secp256k1_fe_negate(&(r->y), &(r->y), 1); 152 } 153 } 154 155 /** Convert a number to WNAF notation. The number becomes represented by sum(2^i * wnaf[i], i=0..bits), 156 * with the following guarantees: 157 * - each wnaf[i] is either 0, or an odd integer between -(1<<(w-1) - 1) and (1<<(w-1) - 1) 158 * - two non-zero entries in wnaf are separated by at least w-1 zeroes. 159 * - the number of set values in wnaf is returned. This number is at most 256, and at most one more 160 * than the number of bits in the (absolute value) of the input. 161 */ 162 static int secp256k1_ecmult_wnaf(int *wnaf, int len, const secp256k1_scalar *a, int w) { 163 secp256k1_scalar s; 164 int last_set_bit = -1; 165 int bit = 0; 166 int sign = 1; 167 int carry = 0; 168 169 VERIFY_CHECK(wnaf != NULL); 170 VERIFY_CHECK(0 <= len && len <= 256); 171 VERIFY_CHECK(a != NULL); 172 VERIFY_CHECK(2 <= w && w <= 31); 173 174 for (bit = 0; bit < len; bit++) { 175 wnaf[bit] = 0; 176 } 177 178 s = *a; 179 if (secp256k1_scalar_get_bits_limb32(&s, 255, 1)) { 180 secp256k1_scalar_negate(&s, &s); 181 sign = -1; 182 } 183 184 bit = 0; 185 while (bit < len) { 186 int now; 187 int word; 188 if (secp256k1_scalar_get_bits_limb32(&s, bit, 1) == (unsigned int)carry) { 189 bit++; 190 continue; 191 } 192 193 now = w; 194 if (now > len - bit) { 195 now = len - bit; 196 } 197 198 word = secp256k1_scalar_get_bits_var(&s, bit, now) + carry; 199 200 carry = (word >> (w-1)) & 1; 201 word -= carry << w; 202 203 wnaf[bit] = sign * word; 204 last_set_bit = bit; 205 206 bit += now; 207 } 208 #ifdef VERIFY 209 { 210 int verify_bit = bit; 211 212 VERIFY_CHECK(carry == 0); 213 214 while (verify_bit < 256) { 215 VERIFY_CHECK(secp256k1_scalar_get_bits_limb32(&s, verify_bit, 1) == 0); 216 verify_bit++; 217 } 218 } 219 #endif 220 return last_set_bit + 1; 221 } 222 223 /* Same as secp256k1_ecmult_wnaf, but stores to int8_t array. Requires w <= 8. */ 224 static int secp256k1_ecmult_wnaf_small(int8_t *wnaf, int len, const secp256k1_scalar *a, int w) { 225 int wnaf_tmp[256]; 226 int ret, i; 227 228 VERIFY_CHECK(2 <= w && w <= 8); 229 ret = secp256k1_ecmult_wnaf(wnaf_tmp, len, a, w); 230 231 for (i = 0; i < len; i++) { 232 wnaf[i] = (int8_t)wnaf_tmp[i]; 233 } 234 235 return ret; 236 } 237 238 struct secp256k1_strauss_point_state { 239 int8_t wnaf_na_1[129]; 240 int8_t wnaf_na_lam[129]; 241 int bits_na_1; 242 int bits_na_lam; 243 }; 244 245 struct secp256k1_strauss_state { 246 /* aux is used to hold z-ratios, and then used to hold pre_a[i].x * BETA values. */ 247 secp256k1_fe* aux; 248 secp256k1_ge* pre_a; 249 struct secp256k1_strauss_point_state* ps; 250 }; 251 252 static void secp256k1_ecmult_strauss_wnaf(const struct secp256k1_strauss_state *state, secp256k1_gej *r, size_t num, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { 253 secp256k1_ge tmpa; 254 secp256k1_fe Z; 255 /* Split G factors. */ 256 secp256k1_scalar ng_1, ng_128; 257 int wnaf_ng_1[129]; 258 int bits_ng_1 = 0; 259 int wnaf_ng_128[129]; 260 int bits_ng_128 = 0; 261 int i; 262 int bits = 0; 263 size_t np; 264 size_t no = 0; 265 266 secp256k1_fe_set_int(&Z, 1); 267 for (np = 0; np < num; ++np) { 268 secp256k1_gej tmp; 269 secp256k1_scalar na_1, na_lam; 270 if (secp256k1_scalar_is_zero(&na[np]) || secp256k1_gej_is_infinity(&a[np])) { 271 continue; 272 } 273 /* split na into na_1 and na_lam (where na = na_1 + na_lam*lambda, and na_1 and na_lam are ~128 bit) */ 274 secp256k1_scalar_split_lambda(&na_1, &na_lam, &na[np]); 275 276 /* build wnaf representation for na_1 and na_lam. */ 277 state->ps[no].bits_na_1 = secp256k1_ecmult_wnaf_small(state->ps[no].wnaf_na_1, 129, &na_1, WINDOW_A); 278 state->ps[no].bits_na_lam = secp256k1_ecmult_wnaf_small(state->ps[no].wnaf_na_lam, 129, &na_lam, WINDOW_A); 279 VERIFY_CHECK(state->ps[no].bits_na_1 <= 129); 280 VERIFY_CHECK(state->ps[no].bits_na_lam <= 129); 281 if (state->ps[no].bits_na_1 > bits) { 282 bits = state->ps[no].bits_na_1; 283 } 284 if (state->ps[no].bits_na_lam > bits) { 285 bits = state->ps[no].bits_na_lam; 286 } 287 288 /* Calculate odd multiples of a. 289 * All multiples are brought to the same Z 'denominator', which is stored 290 * in Z. Due to secp256k1' isomorphism we can do all operations pretending 291 * that the Z coordinate was 1, use affine addition formulae, and correct 292 * the Z coordinate of the result once at the end. 293 * The exception is the precomputed G table points, which are actually 294 * affine. Compared to the base used for other points, they have a Z ratio 295 * of 1/Z, so we can use secp256k1_gej_add_zinv_var, which uses the same 296 * isomorphism to efficiently add with a known Z inverse. 297 */ 298 tmp = a[np]; 299 if (no) { 300 secp256k1_gej_rescale(&tmp, &Z); 301 } 302 secp256k1_ecmult_odd_multiples_table(ECMULT_TABLE_SIZE(WINDOW_A), state->pre_a + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &Z, &tmp); 303 if (no) secp256k1_fe_mul(state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + no * ECMULT_TABLE_SIZE(WINDOW_A), &(a[np].z)); 304 305 ++no; 306 } 307 308 /* Bring them to the same Z denominator. */ 309 if (no) { 310 secp256k1_ge_table_set_globalz(ECMULT_TABLE_SIZE(WINDOW_A) * no, state->pre_a, state->aux); 311 } 312 313 for (np = 0; np < no; ++np) { 314 size_t j; 315 for (j = 0; j < ECMULT_TABLE_SIZE(WINDOW_A); j++) { 316 secp256k1_fe_mul(&state->aux[np * ECMULT_TABLE_SIZE(WINDOW_A) + j], &state->pre_a[np * ECMULT_TABLE_SIZE(WINDOW_A) + j].x, &secp256k1_const_beta); 317 } 318 } 319 320 if (ng) { 321 /* split ng into ng_1 and ng_128 (where gn = gn_1 + gn_128*2^128, and gn_1 and gn_128 are ~128 bit) */ 322 secp256k1_scalar_split_128(&ng_1, &ng_128, ng); 323 324 /* Build wnaf representation for ng_1 and ng_128 */ 325 bits_ng_1 = secp256k1_ecmult_wnaf(wnaf_ng_1, 129, &ng_1, WINDOW_G); 326 bits_ng_128 = secp256k1_ecmult_wnaf(wnaf_ng_128, 129, &ng_128, WINDOW_G); 327 if (bits_ng_1 > bits) { 328 bits = bits_ng_1; 329 } 330 if (bits_ng_128 > bits) { 331 bits = bits_ng_128; 332 } 333 } 334 335 secp256k1_gej_set_infinity(r); 336 337 for (i = bits - 1; i >= 0; i--) { 338 int n; 339 secp256k1_gej_double_var(r, r, NULL); 340 for (np = 0; np < no; ++np) { 341 if (i < state->ps[np].bits_na_1 && (n = state->ps[np].wnaf_na_1[i])) { 342 secp256k1_ecmult_table_get_ge(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); 343 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); 344 } 345 if (i < state->ps[np].bits_na_lam && (n = state->ps[np].wnaf_na_lam[i])) { 346 secp256k1_ecmult_table_get_ge_lambda(&tmpa, state->pre_a + np * ECMULT_TABLE_SIZE(WINDOW_A), state->aux + np * ECMULT_TABLE_SIZE(WINDOW_A), n, WINDOW_A); 347 secp256k1_gej_add_ge_var(r, r, &tmpa, NULL); 348 } 349 } 350 if (i < bits_ng_1 && (n = wnaf_ng_1[i])) { 351 secp256k1_ecmult_table_get_ge_storage(&tmpa, secp256k1_pre_g, n, WINDOW_G); 352 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); 353 } 354 if (i < bits_ng_128 && (n = wnaf_ng_128[i])) { 355 secp256k1_ecmult_table_get_ge_storage(&tmpa, secp256k1_pre_g_128, n, WINDOW_G); 356 secp256k1_gej_add_zinv_var(r, r, &tmpa, &Z); 357 } 358 } 359 360 if (!secp256k1_gej_is_infinity(r)) { 361 secp256k1_fe_mul(&r->z, &r->z, &Z); 362 } 363 } 364 365 static void secp256k1_ecmult(secp256k1_gej *r, const secp256k1_gej *a, const secp256k1_scalar *na, const secp256k1_scalar *ng) { 366 secp256k1_fe aux[ECMULT_TABLE_SIZE(WINDOW_A)]; 367 secp256k1_ge pre_a[ECMULT_TABLE_SIZE(WINDOW_A)]; 368 struct secp256k1_strauss_point_state ps[1]; 369 struct secp256k1_strauss_state state; 370 371 state.aux = aux; 372 state.pre_a = pre_a; 373 state.ps = ps; 374 secp256k1_ecmult_strauss_wnaf(&state, r, 1, a, na, ng); 375 } 376 377 static size_t secp256k1_strauss_scratch_size(size_t n_points) { 378 static const size_t point_size = (sizeof(secp256k1_ge) + sizeof(secp256k1_fe)) * ECMULT_TABLE_SIZE(WINDOW_A) + sizeof(struct secp256k1_strauss_point_state) + sizeof(secp256k1_gej) + sizeof(secp256k1_scalar); 379 return n_points*point_size; 380 } 381 382 static int secp256k1_ecmult_strauss_batch(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { 383 secp256k1_gej* points; 384 secp256k1_scalar* scalars; 385 struct secp256k1_strauss_state state; 386 size_t i; 387 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); 388 389 secp256k1_gej_set_infinity(r); 390 if (inp_g_sc == NULL && n_points == 0) { 391 return 1; 392 } 393 394 /* We allocate STRAUSS_SCRATCH_OBJECTS objects on the scratch space. If these 395 * allocations change, make sure to update the STRAUSS_SCRATCH_OBJECTS 396 * constant and strauss_scratch_size accordingly. */ 397 points = (secp256k1_gej*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_gej)); 398 scalars = (secp256k1_scalar*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(secp256k1_scalar)); 399 state.aux = (secp256k1_fe*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_fe)); 400 state.pre_a = (secp256k1_ge*)secp256k1_scratch_alloc(error_callback, scratch, n_points * ECMULT_TABLE_SIZE(WINDOW_A) * sizeof(secp256k1_ge)); 401 state.ps = (struct secp256k1_strauss_point_state*)secp256k1_scratch_alloc(error_callback, scratch, n_points * sizeof(struct secp256k1_strauss_point_state)); 402 403 if (points == NULL || scalars == NULL || state.aux == NULL || state.pre_a == NULL || state.ps == NULL) { 404 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 405 return 0; 406 } 407 408 for (i = 0; i < n_points; i++) { 409 secp256k1_ge point; 410 if (!cb(&scalars[i], &point, i+cb_offset, cbdata)) { 411 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 412 return 0; 413 } 414 secp256k1_gej_set_ge(&points[i], &point); 415 } 416 secp256k1_ecmult_strauss_wnaf(&state, r, n_points, points, scalars, inp_g_sc); 417 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 418 return 1; 419 } 420 421 /* Wrapper for secp256k1_ecmult_multi_func interface */ 422 static int secp256k1_ecmult_strauss_batch_single(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { 423 return secp256k1_ecmult_strauss_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); 424 } 425 426 static size_t secp256k1_strauss_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) { 427 return secp256k1_scratch_max_allocation(error_callback, scratch, STRAUSS_SCRATCH_OBJECTS) / secp256k1_strauss_scratch_size(1); 428 } 429 430 /** Convert a number to WNAF notation. 431 * The number becomes represented by sum(2^{wi} * wnaf[i], i=0..WNAF_SIZE(w)+1) - return_val. 432 * It has the following guarantees: 433 * - each wnaf[i] is either 0 or an odd integer between -(1 << w) and (1 << w) 434 * - the number of words set is always WNAF_SIZE(w) 435 * - the returned skew is 0 or 1 436 */ 437 static int secp256k1_wnaf_fixed(int *wnaf, const secp256k1_scalar *s, int w) { 438 int skew = 0; 439 int pos; 440 int max_pos; 441 int last_w; 442 const secp256k1_scalar *work = s; 443 444 if (secp256k1_scalar_is_zero(s)) { 445 for (pos = 0; pos < WNAF_SIZE(w); pos++) { 446 wnaf[pos] = 0; 447 } 448 return 0; 449 } 450 451 if (secp256k1_scalar_is_even(s)) { 452 skew = 1; 453 } 454 455 wnaf[0] = secp256k1_scalar_get_bits_var(work, 0, w) + skew; 456 /* Compute last window size. Relevant when window size doesn't divide the 457 * number of bits in the scalar */ 458 last_w = WNAF_BITS - (WNAF_SIZE(w) - 1) * w; 459 460 /* Store the position of the first nonzero word in max_pos to allow 461 * skipping leading zeros when calculating the wnaf. */ 462 for (pos = WNAF_SIZE(w) - 1; pos > 0; pos--) { 463 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); 464 if(val != 0) { 465 break; 466 } 467 wnaf[pos] = 0; 468 } 469 max_pos = pos; 470 pos = 1; 471 472 while (pos <= max_pos) { 473 int val = secp256k1_scalar_get_bits_var(work, pos * w, pos == WNAF_SIZE(w)-1 ? last_w : w); 474 if ((val & 1) == 0) { 475 wnaf[pos - 1] -= (1 << w); 476 wnaf[pos] = (val + 1); 477 } else { 478 wnaf[pos] = val; 479 } 480 /* Set a coefficient to zero if it is 1 or -1 and the proceeding digit 481 * is strictly negative or strictly positive respectively. Only change 482 * coefficients at previous positions because above code assumes that 483 * wnaf[pos - 1] is odd. 484 */ 485 if (pos >= 2 && ((wnaf[pos - 1] == 1 && wnaf[pos - 2] < 0) || (wnaf[pos - 1] == -1 && wnaf[pos - 2] > 0))) { 486 if (wnaf[pos - 1] == 1) { 487 wnaf[pos - 2] += 1 << w; 488 } else { 489 wnaf[pos - 2] -= 1 << w; 490 } 491 wnaf[pos - 1] = 0; 492 } 493 ++pos; 494 } 495 496 return skew; 497 } 498 499 struct secp256k1_pippenger_point_state { 500 int skew_na; 501 size_t input_pos; 502 }; 503 504 struct secp256k1_pippenger_state { 505 int *wnaf_na; 506 struct secp256k1_pippenger_point_state* ps; 507 }; 508 509 /* 510 * pippenger_wnaf computes the result of a multi-point multiplication as 511 * follows: The scalars are brought into wnaf with n_wnaf elements each. Then 512 * for every i < n_wnaf, first each point is added to a "bucket" corresponding 513 * to the point's wnaf[i]. Second, the buckets are added together such that 514 * r += 1*bucket[0] + 3*bucket[1] + 5*bucket[2] + ... 515 */ 516 static int secp256k1_ecmult_pippenger_wnaf(secp256k1_gej *buckets, int bucket_window, struct secp256k1_pippenger_state *state, secp256k1_gej *r, const secp256k1_scalar *sc, const secp256k1_ge *pt, size_t num) { 517 size_t n_wnaf = WNAF_SIZE(bucket_window+1); 518 size_t np; 519 size_t no = 0; 520 int i; 521 522 for (np = 0; np < num; ++np) { 523 if (secp256k1_scalar_is_zero(&sc[np]) || secp256k1_ge_is_infinity(&pt[np])) { 524 continue; 525 } 526 state->ps[no].input_pos = np; 527 state->ps[no].skew_na = secp256k1_wnaf_fixed(&state->wnaf_na[no*n_wnaf], &sc[np], bucket_window+1); 528 no++; 529 } 530 secp256k1_gej_set_infinity(r); 531 532 if (no == 0) { 533 return 1; 534 } 535 536 for (i = n_wnaf - 1; i >= 0; i--) { 537 secp256k1_gej running_sum; 538 int j; 539 size_t buc; 540 541 for (buc = 0; buc < ECMULT_TABLE_SIZE(bucket_window+2); buc++) { 542 secp256k1_gej_set_infinity(&buckets[buc]); 543 } 544 545 for (np = 0; np < no; ++np) { 546 int n = state->wnaf_na[np*n_wnaf + i]; 547 struct secp256k1_pippenger_point_state point_state = state->ps[np]; 548 secp256k1_ge tmp; 549 550 if (i == 0) { 551 /* correct for wnaf skew */ 552 int skew = point_state.skew_na; 553 if (skew) { 554 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); 555 secp256k1_gej_add_ge_var(&buckets[0], &buckets[0], &tmp, NULL); 556 } 557 } 558 if (n > 0) { 559 buc = (n - 1)/2; 560 secp256k1_gej_add_ge_var(&buckets[buc], &buckets[buc], &pt[point_state.input_pos], NULL); 561 } else if (n < 0) { 562 buc = -(n + 1)/2; 563 secp256k1_ge_neg(&tmp, &pt[point_state.input_pos]); 564 secp256k1_gej_add_ge_var(&buckets[buc], &buckets[buc], &tmp, NULL); 565 } 566 } 567 568 for (j = 0; j < bucket_window; j++) { 569 secp256k1_gej_double_var(r, r, NULL); 570 } 571 572 secp256k1_gej_set_infinity(&running_sum); 573 /* Accumulate the sum: bucket[0] + 3*bucket[1] + 5*bucket[2] + 7*bucket[3] + ... 574 * = bucket[0] + bucket[1] + bucket[2] + bucket[3] + ... 575 * + 2 * (bucket[1] + 2*bucket[2] + 3*bucket[3] + ...) 576 * using an intermediate running sum: 577 * running_sum = bucket[0] + bucket[1] + bucket[2] + ... 578 * 579 * The doubling is done implicitly by deferring the final window doubling (of 'r'). 580 */ 581 for (buc = ECMULT_TABLE_SIZE(bucket_window+2) - 1; buc > 0; buc--) { 582 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[buc], NULL); 583 secp256k1_gej_add_var(r, r, &running_sum, NULL); 584 } 585 586 secp256k1_gej_add_var(&running_sum, &running_sum, &buckets[0], NULL); 587 secp256k1_gej_double_var(r, r, NULL); 588 secp256k1_gej_add_var(r, r, &running_sum, NULL); 589 } 590 return 1; 591 } 592 593 /** 594 * Returns optimal bucket_window (number of bits of a scalar represented by a 595 * set of buckets) for a given number of points. 596 */ 597 static int secp256k1_pippenger_bucket_window(size_t n) { 598 if (n <= 1) { 599 return 1; 600 } else if (n <= 4) { 601 return 2; 602 } else if (n <= 20) { 603 return 3; 604 } else if (n <= 57) { 605 return 4; 606 } else if (n <= 136) { 607 return 5; 608 } else if (n <= 235) { 609 return 6; 610 } else if (n <= 1260) { 611 return 7; 612 } else if (n <= 4420) { 613 return 9; 614 } else if (n <= 7880) { 615 return 10; 616 } else if (n <= 16050) { 617 return 11; 618 } else { 619 return PIPPENGER_MAX_BUCKET_WINDOW; 620 } 621 } 622 623 /** 624 * Returns the maximum optimal number of points for a bucket_window. 625 */ 626 static size_t secp256k1_pippenger_bucket_window_inv(int bucket_window) { 627 switch(bucket_window) { 628 case 1: return 1; 629 case 2: return 4; 630 case 3: return 20; 631 case 4: return 57; 632 case 5: return 136; 633 case 6: return 235; 634 case 7: return 1260; 635 case 8: return 1260; 636 case 9: return 4420; 637 case 10: return 7880; 638 case 11: return 16050; 639 case PIPPENGER_MAX_BUCKET_WINDOW: return SIZE_MAX; 640 } 641 return 0; 642 } 643 644 645 SECP256K1_INLINE static void secp256k1_ecmult_endo_split(secp256k1_scalar *s1, secp256k1_scalar *s2, secp256k1_ge *p1, secp256k1_ge *p2) { 646 secp256k1_scalar tmp = *s1; 647 secp256k1_scalar_split_lambda(s1, s2, &tmp); 648 secp256k1_ge_mul_lambda(p2, p1); 649 650 if (secp256k1_scalar_is_high(s1)) { 651 secp256k1_scalar_negate(s1, s1); 652 secp256k1_ge_neg(p1, p1); 653 } 654 if (secp256k1_scalar_is_high(s2)) { 655 secp256k1_scalar_negate(s2, s2); 656 secp256k1_ge_neg(p2, p2); 657 } 658 } 659 660 /** 661 * Returns the scratch size required for a given number of points (excluding 662 * base point G) without considering alignment. 663 */ 664 static size_t secp256k1_pippenger_scratch_size(size_t n_points, int bucket_window) { 665 size_t entries = 2*n_points + 2; 666 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); 667 return (sizeof(secp256k1_gej) << bucket_window) + sizeof(struct secp256k1_pippenger_state) + entries * entry_size; 668 } 669 670 static int secp256k1_ecmult_pippenger_batch(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points, size_t cb_offset) { 671 const size_t scratch_checkpoint = secp256k1_scratch_checkpoint(error_callback, scratch); 672 /* Use 2(n+1) with the endomorphism, when calculating batch 673 * sizes. The reason for +1 is that we add the G scalar to the list of 674 * other scalars. */ 675 size_t entries = 2*n_points + 2; 676 secp256k1_ge *points; 677 secp256k1_scalar *scalars; 678 secp256k1_gej *buckets; 679 struct secp256k1_pippenger_state *state_space; 680 size_t idx = 0; 681 size_t point_idx = 0; 682 int bucket_window; 683 684 secp256k1_gej_set_infinity(r); 685 if (inp_g_sc == NULL && n_points == 0) { 686 return 1; 687 } 688 bucket_window = secp256k1_pippenger_bucket_window(n_points); 689 690 /* We allocate PIPPENGER_SCRATCH_OBJECTS objects on the scratch space. If 691 * these allocations change, make sure to update the 692 * PIPPENGER_SCRATCH_OBJECTS constant and pippenger_scratch_size 693 * accordingly. */ 694 points = (secp256k1_ge *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*points)); 695 scalars = (secp256k1_scalar *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*scalars)); 696 state_space = (struct secp256k1_pippenger_state *) secp256k1_scratch_alloc(error_callback, scratch, sizeof(*state_space)); 697 if (points == NULL || scalars == NULL || state_space == NULL) { 698 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 699 return 0; 700 } 701 state_space->ps = (struct secp256k1_pippenger_point_state *) secp256k1_scratch_alloc(error_callback, scratch, entries * sizeof(*state_space->ps)); 702 state_space->wnaf_na = (int *) secp256k1_scratch_alloc(error_callback, scratch, entries*(WNAF_SIZE(bucket_window+1)) * sizeof(int)); 703 buckets = (secp256k1_gej *) secp256k1_scratch_alloc(error_callback, scratch, ((size_t)1 << bucket_window) * sizeof(*buckets)); 704 if (state_space->ps == NULL || state_space->wnaf_na == NULL || buckets == NULL) { 705 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 706 return 0; 707 } 708 709 if (inp_g_sc != NULL) { 710 scalars[0] = *inp_g_sc; 711 points[0] = secp256k1_ge_const_g; 712 idx++; 713 secp256k1_ecmult_endo_split(&scalars[0], &scalars[1], &points[0], &points[1]); 714 idx++; 715 } 716 717 while (point_idx < n_points) { 718 if (!cb(&scalars[idx], &points[idx], point_idx + cb_offset, cbdata)) { 719 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 720 return 0; 721 } 722 idx++; 723 secp256k1_ecmult_endo_split(&scalars[idx - 1], &scalars[idx], &points[idx - 1], &points[idx]); 724 idx++; 725 point_idx++; 726 } 727 728 secp256k1_ecmult_pippenger_wnaf(buckets, bucket_window, state_space, r, scalars, points, idx); 729 secp256k1_scratch_apply_checkpoint(error_callback, scratch, scratch_checkpoint); 730 return 1; 731 } 732 733 /* Wrapper for secp256k1_ecmult_multi_func interface */ 734 static int secp256k1_ecmult_pippenger_batch_single(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { 735 return secp256k1_ecmult_pippenger_batch(error_callback, scratch, r, inp_g_sc, cb, cbdata, n, 0); 736 } 737 738 /** 739 * Returns the maximum number of points in addition to G that can be used with 740 * a given scratch space. The function ensures that fewer points may also be 741 * used. 742 */ 743 static size_t secp256k1_pippenger_max_points(const secp256k1_callback* error_callback, secp256k1_scratch *scratch) { 744 size_t max_alloc = secp256k1_scratch_max_allocation(error_callback, scratch, PIPPENGER_SCRATCH_OBJECTS); 745 int bucket_window; 746 size_t res = 0; 747 748 for (bucket_window = 1; bucket_window <= PIPPENGER_MAX_BUCKET_WINDOW; bucket_window++) { 749 size_t n_points; 750 size_t max_points = secp256k1_pippenger_bucket_window_inv(bucket_window); 751 size_t space_for_points; 752 size_t space_overhead; 753 size_t entry_size = sizeof(secp256k1_ge) + sizeof(secp256k1_scalar) + sizeof(struct secp256k1_pippenger_point_state) + (WNAF_SIZE(bucket_window+1)+1)*sizeof(int); 754 755 entry_size = 2*entry_size; 756 space_overhead = (sizeof(secp256k1_gej) << bucket_window) + entry_size + sizeof(struct secp256k1_pippenger_state); 757 if (space_overhead > max_alloc) { 758 break; 759 } 760 space_for_points = max_alloc - space_overhead; 761 762 n_points = space_for_points/entry_size; 763 n_points = n_points > max_points ? max_points : n_points; 764 if (n_points > res) { 765 res = n_points; 766 } 767 if (n_points < max_points) { 768 /* A larger bucket_window may support even more points. But if we 769 * would choose that then the caller couldn't safely use any number 770 * smaller than what this function returns */ 771 break; 772 } 773 } 774 return res; 775 } 776 777 /* Computes ecmult_multi by simply multiplying and adding each point. Does not 778 * require a scratch space */ 779 static int secp256k1_ecmult_multi_simple_var(secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n_points) { 780 size_t point_idx; 781 secp256k1_gej tmpj; 782 783 secp256k1_gej_set_infinity(r); 784 secp256k1_gej_set_infinity(&tmpj); 785 /* r = inp_g_sc*G */ 786 secp256k1_ecmult(r, &tmpj, &secp256k1_scalar_zero, inp_g_sc); 787 for (point_idx = 0; point_idx < n_points; point_idx++) { 788 secp256k1_ge point; 789 secp256k1_gej pointj; 790 secp256k1_scalar scalar; 791 if (!cb(&scalar, &point, point_idx, cbdata)) { 792 return 0; 793 } 794 /* r += scalar*point */ 795 secp256k1_gej_set_ge(&pointj, &point); 796 secp256k1_ecmult(&tmpj, &pointj, &scalar, NULL); 797 secp256k1_gej_add_var(r, r, &tmpj, NULL); 798 } 799 return 1; 800 } 801 802 /* Compute the number of batches and the batch size given the maximum batch size and the 803 * total number of points */ 804 static int secp256k1_ecmult_multi_batch_size_helper(size_t *n_batches, size_t *n_batch_points, size_t max_n_batch_points, size_t n) { 805 if (max_n_batch_points == 0) { 806 return 0; 807 } 808 if (max_n_batch_points > ECMULT_MAX_POINTS_PER_BATCH) { 809 max_n_batch_points = ECMULT_MAX_POINTS_PER_BATCH; 810 } 811 if (n == 0) { 812 *n_batches = 0; 813 *n_batch_points = 0; 814 return 1; 815 } 816 /* Compute ceil(n/max_n_batch_points) and ceil(n/n_batches) */ 817 *n_batches = CEIL_DIV(n, max_n_batch_points); 818 *n_batch_points = CEIL_DIV(n, *n_batches); 819 return 1; 820 } 821 822 typedef int (*secp256k1_ecmult_multi_func)(const secp256k1_callback* error_callback, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t); 823 static int secp256k1_ecmult_multi_var(const secp256k1_callback* error_callback, secp256k1_scratch *scratch, secp256k1_gej *r, const secp256k1_scalar *inp_g_sc, secp256k1_ecmult_multi_callback cb, void *cbdata, size_t n) { 824 size_t i; 825 826 int (*f)(const secp256k1_callback* error_callback, secp256k1_scratch*, secp256k1_gej*, const secp256k1_scalar*, secp256k1_ecmult_multi_callback cb, void*, size_t, size_t); 827 size_t n_batches; 828 size_t n_batch_points; 829 830 secp256k1_gej_set_infinity(r); 831 if (inp_g_sc == NULL && n == 0) { 832 return 1; 833 } else if (n == 0) { 834 secp256k1_ecmult(r, r, &secp256k1_scalar_zero, inp_g_sc); 835 return 1; 836 } 837 if (scratch == NULL) { 838 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 839 } 840 841 /* Compute the batch sizes for Pippenger's algorithm given a scratch space. If it's greater than 842 * a threshold use Pippenger's algorithm. Otherwise use Strauss' algorithm. 843 * As a first step check if there's enough space for Pippenger's algo (which requires less space 844 * than Strauss' algo) and if not, use the simple algorithm. */ 845 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_pippenger_max_points(error_callback, scratch), n)) { 846 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 847 } 848 if (n_batch_points >= ECMULT_PIPPENGER_THRESHOLD) { 849 f = secp256k1_ecmult_pippenger_batch; 850 } else { 851 if (!secp256k1_ecmult_multi_batch_size_helper(&n_batches, &n_batch_points, secp256k1_strauss_max_points(error_callback, scratch), n)) { 852 return secp256k1_ecmult_multi_simple_var(r, inp_g_sc, cb, cbdata, n); 853 } 854 f = secp256k1_ecmult_strauss_batch; 855 } 856 for(i = 0; i < n_batches; i++) { 857 size_t nbp = n < n_batch_points ? n : n_batch_points; 858 size_t offset = n_batch_points*i; 859 secp256k1_gej tmp; 860 if (!f(error_callback, scratch, &tmp, i == 0 ? inp_g_sc : NULL, cb, cbdata, nbp, offset)) { 861 return 0; 862 } 863 secp256k1_gej_add_var(r, r, &tmp, NULL); 864 n -= nbp; 865 } 866 return 1; 867 } 868 869 #endif /* SECP256K1_ECMULT_IMPL_H */