tests_exhaustive.c
1 /*********************************************************************** 2 * Copyright (c) 2016 Andrew Poelstra * 3 * Distributed under the MIT software license, see the accompanying * 4 * file COPYING or https://www.opensource.org/licenses/mit-license.php.* 5 ***********************************************************************/ 6 7 #include <stdio.h> 8 #include <stdlib.h> 9 #include <time.h> 10 11 #ifndef EXHAUSTIVE_TEST_ORDER 12 /* see group_impl.h for allowable values */ 13 #define EXHAUSTIVE_TEST_ORDER 13 14 #endif 15 16 /* These values of B are all values in [1, 8] that result in a curve with even order. */ 17 #define EXHAUSTIVE_TEST_CURVE_HAS_EVEN_ORDER (SECP256K1_B == 1 || SECP256K1_B == 6 || SECP256K1_B == 8) 18 19 #ifdef USE_EXTERNAL_DEFAULT_CALLBACKS 20 #pragma message("Ignoring USE_EXTERNAL_CALLBACKS in exhaustive_tests.") 21 #undef USE_EXTERNAL_DEFAULT_CALLBACKS 22 #endif 23 #include "secp256k1.c" 24 25 #include "../include/secp256k1.h" 26 #include "assumptions.h" 27 #include "group.h" 28 #include "testrand_impl.h" 29 #include "ecmult_compute_table_impl.h" 30 #include "ecmult_gen_compute_table_impl.h" 31 #include "testutil.h" 32 #include "util.h" 33 34 static int count = 2; 35 36 static uint32_t num_cores = 1; 37 static uint32_t this_core = 0; 38 39 SECP256K1_INLINE static int skip_section(uint64_t* iter) { 40 if (num_cores == 1) return 0; 41 *iter += 0xe7037ed1a0b428dbULL; 42 return ((((uint32_t)*iter ^ (*iter >> 32)) * num_cores) >> 32) != this_core; 43 } 44 45 static int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32, 46 const unsigned char *key32, const unsigned char *algo16, 47 void *data, unsigned int attempt) { 48 secp256k1_scalar s; 49 int *idata = data; 50 (void)msg32; 51 (void)key32; 52 (void)algo16; 53 /* Some nonces cannot be used because they'd cause s and/or r to be zero. 54 * The signing function has retry logic here that just re-calls the nonce 55 * function with an increased `attempt`. So if attempt > 0 this means we 56 * need to change the nonce to avoid an infinite loop. */ 57 if (attempt > 0) { 58 *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER; 59 } 60 secp256k1_scalar_set_int(&s, *idata); 61 secp256k1_scalar_get_b32(nonce32, &s); 62 return 1; 63 } 64 65 static void test_exhaustive_endomorphism(const secp256k1_ge *group) { 66 int i; 67 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { 68 secp256k1_ge res; 69 secp256k1_ge_mul_lambda(&res, &group[i]); 70 CHECK(secp256k1_ge_eq_var(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res)); 71 } 72 } 73 74 static void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj) { 75 int i, j; 76 uint64_t iter = 0; 77 78 /* Sanity-check (and check infinity functions) */ 79 CHECK(secp256k1_ge_is_infinity(&group[0])); 80 CHECK(secp256k1_gej_is_infinity(&groupj[0])); 81 for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { 82 CHECK(!secp256k1_ge_is_infinity(&group[i])); 83 CHECK(!secp256k1_gej_is_infinity(&groupj[i])); 84 } 85 86 /* Check all addition formulae */ 87 for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { 88 secp256k1_fe fe_inv; 89 if (skip_section(&iter)) continue; 90 secp256k1_fe_inv(&fe_inv, &groupj[j].z); 91 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { 92 secp256k1_ge zless_gej; 93 secp256k1_gej tmp; 94 /* add_var */ 95 secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL); 96 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); 97 /* add_ge */ 98 if (j > 0) { 99 secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]); 100 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); 101 } 102 /* add_ge_var */ 103 secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL); 104 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); 105 /* add_zinv_var */ 106 if (secp256k1_gej_is_infinity(&groupj[j])) { 107 secp256k1_ge_set_infinity(&zless_gej); 108 } else { 109 secp256k1_ge_set_xy(&zless_gej, &groupj[j].x, &groupj[j].y); 110 } 111 secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv); 112 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i + j) % EXHAUSTIVE_TEST_ORDER])); 113 } 114 } 115 116 /* Check doubling */ 117 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { 118 secp256k1_gej tmp; 119 secp256k1_gej_double(&tmp, &groupj[i]); 120 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(2 * i) % EXHAUSTIVE_TEST_ORDER])); 121 secp256k1_gej_double_var(&tmp, &groupj[i], NULL); 122 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(2 * i) % EXHAUSTIVE_TEST_ORDER])); 123 } 124 125 /* Check negation */ 126 for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { 127 secp256k1_ge tmp; 128 secp256k1_gej tmpj; 129 secp256k1_ge_neg(&tmp, &group[i]); 130 CHECK(secp256k1_ge_eq_var(&tmp, &group[EXHAUSTIVE_TEST_ORDER - i])); 131 secp256k1_gej_neg(&tmpj, &groupj[i]); 132 CHECK(secp256k1_gej_eq_ge_var(&tmpj, &group[EXHAUSTIVE_TEST_ORDER - i])); 133 } 134 } 135 136 static void test_exhaustive_ecmult(const secp256k1_ge *group, const secp256k1_gej *groupj) { 137 int i, j, r_log; 138 uint64_t iter = 0; 139 for (r_log = 1; r_log < EXHAUSTIVE_TEST_ORDER; r_log++) { 140 for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { 141 if (skip_section(&iter)) continue; 142 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { 143 secp256k1_gej tmp; 144 secp256k1_scalar na, ng; 145 secp256k1_scalar_set_int(&na, i); 146 secp256k1_scalar_set_int(&ng, j); 147 148 secp256k1_ecmult(&tmp, &groupj[r_log], &na, &ng); 149 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i * r_log + j) % EXHAUSTIVE_TEST_ORDER])); 150 } 151 } 152 } 153 154 for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { 155 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { 156 int ret; 157 secp256k1_gej tmp; 158 secp256k1_fe xn, xd, tmpf; 159 secp256k1_scalar ng; 160 161 if (skip_section(&iter)) continue; 162 163 secp256k1_scalar_set_int(&ng, j); 164 165 /* Test secp256k1_ecmult_const. */ 166 secp256k1_ecmult_const(&tmp, &group[i], &ng); 167 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i * j) % EXHAUSTIVE_TEST_ORDER])); 168 169 if (i != 0 && j != 0) { 170 /* Test secp256k1_ecmult_const_xonly with all curve X coordinates, and xd=NULL. */ 171 ret = secp256k1_ecmult_const_xonly(&tmpf, &group[i].x, NULL, &ng, 0); 172 CHECK(ret); 173 CHECK(secp256k1_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x)); 174 175 /* Test secp256k1_ecmult_const_xonly with all curve X coordinates, with random xd. */ 176 testutil_random_fe_non_zero(&xd); 177 secp256k1_fe_mul(&xn, &xd, &group[i].x); 178 ret = secp256k1_ecmult_const_xonly(&tmpf, &xn, &xd, &ng, 0); 179 CHECK(ret); 180 CHECK(secp256k1_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x)); 181 } 182 } 183 } 184 } 185 186 typedef struct { 187 secp256k1_scalar sc[2]; 188 secp256k1_ge pt[2]; 189 } ecmult_multi_data; 190 191 static int ecmult_multi_callback(secp256k1_scalar *sc, secp256k1_ge *pt, size_t idx, void *cbdata) { 192 ecmult_multi_data *data = (ecmult_multi_data*) cbdata; 193 *sc = data->sc[idx]; 194 *pt = data->pt[idx]; 195 return 1; 196 } 197 198 static void test_exhaustive_ecmult_multi(const secp256k1_context *ctx, const secp256k1_ge *group) { 199 int i, j, k, x, y; 200 uint64_t iter = 0; 201 secp256k1_scratch *scratch = secp256k1_scratch_create(&ctx->error_callback, 4096); 202 for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++) { 203 for (j = 0; j < EXHAUSTIVE_TEST_ORDER; j++) { 204 for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { 205 for (x = 0; x < EXHAUSTIVE_TEST_ORDER; x++) { 206 if (skip_section(&iter)) continue; 207 for (y = 0; y < EXHAUSTIVE_TEST_ORDER; y++) { 208 secp256k1_gej tmp; 209 secp256k1_scalar g_sc; 210 ecmult_multi_data data; 211 212 secp256k1_scalar_set_int(&data.sc[0], i); 213 secp256k1_scalar_set_int(&data.sc[1], j); 214 secp256k1_scalar_set_int(&g_sc, k); 215 data.pt[0] = group[x]; 216 data.pt[1] = group[y]; 217 218 secp256k1_ecmult_multi_var(&ctx->error_callback, scratch, &tmp, &g_sc, ecmult_multi_callback, &data, 2); 219 CHECK(secp256k1_gej_eq_ge_var(&tmp, &group[(i * x + j * y + k) % EXHAUSTIVE_TEST_ORDER])); 220 } 221 } 222 } 223 } 224 } 225 secp256k1_scratch_destroy(&ctx->error_callback, scratch); 226 } 227 228 static void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k, int* overflow) { 229 secp256k1_fe x; 230 unsigned char x_bin[32]; 231 k %= EXHAUSTIVE_TEST_ORDER; 232 x = group[k].x; 233 secp256k1_fe_normalize(&x); 234 secp256k1_fe_get_b32(x_bin, &x); 235 secp256k1_scalar_set_b32(r, x_bin, overflow); 236 } 237 238 static void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group) { 239 int s, r, msg, key; 240 uint64_t iter = 0; 241 for (s = 1; s < EXHAUSTIVE_TEST_ORDER; s++) { 242 for (r = 1; r < EXHAUSTIVE_TEST_ORDER; r++) { 243 for (msg = 1; msg < EXHAUSTIVE_TEST_ORDER; msg++) { 244 for (key = 1; key < EXHAUSTIVE_TEST_ORDER; key++) { 245 secp256k1_ge nonconst_ge; 246 secp256k1_ecdsa_signature sig; 247 secp256k1_pubkey pk; 248 secp256k1_scalar sk_s, msg_s, r_s, s_s; 249 secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s; 250 int k, should_verify; 251 unsigned char msg32[32]; 252 253 if (skip_section(&iter)) continue; 254 255 secp256k1_scalar_set_int(&s_s, s); 256 secp256k1_scalar_set_int(&r_s, r); 257 secp256k1_scalar_set_int(&msg_s, msg); 258 secp256k1_scalar_set_int(&sk_s, key); 259 260 /* Verify by hand */ 261 /* Run through every k value that gives us this r and check that *one* works. 262 * Note there could be none, there could be multiple, ECDSA is weird. */ 263 should_verify = 0; 264 for (k = 0; k < EXHAUSTIVE_TEST_ORDER; k++) { 265 secp256k1_scalar check_x_s; 266 r_from_k(&check_x_s, group, k, NULL); 267 if (r_s == check_x_s) { 268 secp256k1_scalar_set_int(&s_times_k_s, k); 269 secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s); 270 secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s); 271 secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s); 272 should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s); 273 } 274 } 275 /* nb we have a "high s" rule */ 276 should_verify &= !secp256k1_scalar_is_high(&s_s); 277 278 /* Verify by calling verify */ 279 secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s); 280 memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge)); 281 secp256k1_pubkey_save(&pk, &nonconst_ge); 282 secp256k1_scalar_get_b32(msg32, &msg_s); 283 CHECK(should_verify == 284 secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk)); 285 } 286 } 287 } 288 } 289 } 290 291 static void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group) { 292 int i, j, k; 293 uint64_t iter = 0; 294 295 /* Loop */ 296 for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { /* message */ 297 for (j = 1; j < EXHAUSTIVE_TEST_ORDER; j++) { /* key */ 298 if (skip_section(&iter)) continue; 299 for (k = 1; k < EXHAUSTIVE_TEST_ORDER; k++) { /* nonce */ 300 const int starting_k = k; 301 int ret; 302 secp256k1_ecdsa_signature sig; 303 secp256k1_scalar sk, msg, r, s, expected_r; 304 unsigned char sk32[32], msg32[32]; 305 secp256k1_scalar_set_int(&msg, i); 306 secp256k1_scalar_set_int(&sk, j); 307 secp256k1_scalar_get_b32(sk32, &sk); 308 secp256k1_scalar_get_b32(msg32, &msg); 309 310 ret = secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k); 311 CHECK(ret == 1); 312 313 secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig); 314 /* Note that we compute expected_r *after* signing -- this is important 315 * because our nonce-computing function function might change k during 316 * signing. */ 317 r_from_k(&expected_r, group, k, NULL); 318 CHECK(r == expected_r); 319 CHECK((k * s) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER || 320 (k * (EXHAUSTIVE_TEST_ORDER - s)) % EXHAUSTIVE_TEST_ORDER == (i + r * j) % EXHAUSTIVE_TEST_ORDER); 321 322 /* Overflow means we've tried every possible nonce */ 323 if (k < starting_k) { 324 break; 325 } 326 } 327 } 328 } 329 330 /* We would like to verify zero-knowledge here by counting how often every 331 * possible (s, r) tuple appears, but because the group order is larger 332 * than the field order, when coercing the x-values to scalar values, some 333 * appear more often than others, so we are actually not zero-knowledge. 334 * (This effect also appears in the real code, but the difference is on the 335 * order of 1/2^128th the field order, so the deviation is not useful to a 336 * computationally bounded attacker.) 337 */ 338 } 339 340 #ifdef ENABLE_MODULE_RECOVERY 341 #include "modules/recovery/tests_exhaustive_impl.h" 342 #endif 343 344 #ifdef ENABLE_MODULE_EXTRAKEYS 345 #include "modules/extrakeys/tests_exhaustive_impl.h" 346 #endif 347 348 #ifdef ENABLE_MODULE_SCHNORRSIG 349 #include "modules/schnorrsig/tests_exhaustive_impl.h" 350 #endif 351 352 #ifdef ENABLE_MODULE_ELLSWIFT 353 #include "modules/ellswift/tests_exhaustive_impl.h" 354 #endif 355 356 int main(int argc, char** argv) { 357 int i; 358 secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER]; 359 secp256k1_ge group[EXHAUSTIVE_TEST_ORDER]; 360 unsigned char rand32[32]; 361 secp256k1_context *ctx; 362 363 /* Disable buffering for stdout to improve reliability of getting 364 * diagnostic information. Happens right at the start of main because 365 * setbuf must be used before any other operation on the stream. */ 366 setbuf(stdout, NULL); 367 /* Also disable buffering for stderr because it's not guaranteed that it's 368 * unbuffered on all systems. */ 369 setbuf(stderr, NULL); 370 371 printf("Exhaustive tests for order %lu\n", (unsigned long)EXHAUSTIVE_TEST_ORDER); 372 373 /* find iteration count */ 374 if (argc > 1) { 375 count = strtol(argv[1], NULL, 0); 376 } 377 printf("test count = %i\n", count); 378 379 /* find random seed */ 380 testrand_init(argc > 2 ? argv[2] : NULL); 381 382 /* set up split processing */ 383 if (argc > 4) { 384 num_cores = strtol(argv[3], NULL, 0); 385 this_core = strtol(argv[4], NULL, 0); 386 if (num_cores < 1 || this_core >= num_cores) { 387 fprintf(stderr, "Usage: %s [count] [seed] [numcores] [thiscore]\n", argv[0]); 388 return EXIT_FAILURE; 389 } 390 printf("running tests for core %lu (out of [0..%lu])\n", (unsigned long)this_core, (unsigned long)num_cores - 1); 391 } 392 393 /* Recreate the ecmult{,_gen} tables using the right generator (as selected via EXHAUSTIVE_TEST_ORDER) */ 394 secp256k1_ecmult_gen_compute_table(&secp256k1_ecmult_gen_prec_table[0][0], &secp256k1_ge_const_g, COMB_BLOCKS, COMB_TEETH, COMB_SPACING); 395 secp256k1_ecmult_compute_two_tables(secp256k1_pre_g, secp256k1_pre_g_128, WINDOW_G, &secp256k1_ge_const_g); 396 397 while (count--) { 398 /* Build context */ 399 ctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); 400 testrand256(rand32); 401 CHECK(secp256k1_context_randomize(ctx, rand32)); 402 403 /* Generate the entire group */ 404 secp256k1_gej_set_infinity(&groupj[0]); 405 secp256k1_ge_set_gej(&group[0], &groupj[0]); 406 for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) { 407 secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g); 408 secp256k1_ge_set_gej(&group[i], &groupj[i]); 409 if (count != 0) { 410 /* Set a different random z-value for each Jacobian point, except z=1 411 is used in the last iteration. */ 412 secp256k1_fe z; 413 testutil_random_fe(&z); 414 secp256k1_gej_rescale(&groupj[i], &z); 415 } 416 417 /* Verify against ecmult_gen */ 418 { 419 secp256k1_scalar scalar_i; 420 secp256k1_gej generatedj; 421 secp256k1_ge generated; 422 423 secp256k1_scalar_set_int(&scalar_i, i); 424 secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i); 425 secp256k1_ge_set_gej(&generated, &generatedj); 426 427 CHECK(!secp256k1_ge_is_infinity(&group[i])); 428 CHECK(secp256k1_ge_eq_var(&group[i], &generated)); 429 } 430 } 431 432 /* Run the tests */ 433 test_exhaustive_endomorphism(group); 434 test_exhaustive_addition(group, groupj); 435 test_exhaustive_ecmult(group, groupj); 436 test_exhaustive_ecmult_multi(ctx, group); 437 test_exhaustive_sign(ctx, group); 438 test_exhaustive_verify(ctx, group); 439 440 #ifdef ENABLE_MODULE_RECOVERY 441 test_exhaustive_recovery(ctx, group); 442 #endif 443 #ifdef ENABLE_MODULE_EXTRAKEYS 444 test_exhaustive_extrakeys(ctx, group); 445 #endif 446 #ifdef ENABLE_MODULE_SCHNORRSIG 447 test_exhaustive_schnorrsig(ctx); 448 #endif 449 #ifdef ENABLE_MODULE_ELLSWIFT 450 /* The ellswift algorithm does have additional edge cases when operating on 451 * curves of even order, which are not included in the code as secp256k1 is 452 * of odd order. Skip the ellswift tests if the used exhaustive tests curve 453 * is even-ordered accordingly. */ 454 #if !EXHAUSTIVE_TEST_CURVE_HAS_EVEN_ORDER 455 test_exhaustive_ellswift(ctx, group); 456 #endif 457 #endif 458 459 secp256k1_context_destroy(ctx); 460 } 461 462 printf("no problems found\n"); 463 return EXIT_SUCCESS; 464 }