/ src / secp256k1 / src / field_5x52_impl.h
field_5x52_impl.h
  1  /***********************************************************************
  2   * Copyright (c) 2013, 2014 Pieter Wuille                              *
  3   * Distributed under the MIT software license, see the accompanying    *
  4   * file COPYING or https://www.opensource.org/licenses/mit-license.php.*
  5   ***********************************************************************/
  6  
  7  #ifndef SECP256K1_FIELD_REPR_IMPL_H
  8  #define SECP256K1_FIELD_REPR_IMPL_H
  9  
 10  #include "checkmem.h"
 11  #include "util.h"
 12  #include "field.h"
 13  #include "modinv64_impl.h"
 14  
 15  #include "field_5x52_int128_impl.h"
 16  
 17  #ifdef VERIFY
 18  static void secp256k1_fe_impl_verify(const secp256k1_fe *a) {
 19      const uint64_t *d = a->n;
 20      int m = a->normalized ? 1 : 2 * a->magnitude;
 21     /* secp256k1 'p' value defined in "Standards for Efficient Cryptography" (SEC2) 2.7.1. */
 22      VERIFY_CHECK(d[0] <= 0xFFFFFFFFFFFFFULL * m);
 23      VERIFY_CHECK(d[1] <= 0xFFFFFFFFFFFFFULL * m);
 24      VERIFY_CHECK(d[2] <= 0xFFFFFFFFFFFFFULL * m);
 25      VERIFY_CHECK(d[3] <= 0xFFFFFFFFFFFFFULL * m);
 26      VERIFY_CHECK(d[4] <= 0x0FFFFFFFFFFFFULL * m);
 27      if (a->normalized) {
 28          if ((d[4] == 0x0FFFFFFFFFFFFULL) && ((d[3] & d[2] & d[1]) == 0xFFFFFFFFFFFFFULL)) {
 29              VERIFY_CHECK(d[0] < 0xFFFFEFFFFFC2FULL);
 30          }
 31      }
 32  }
 33  #endif
 34  
 35  static void secp256k1_fe_impl_get_bounds(secp256k1_fe *r, int m) {
 36      r->n[0] = 0xFFFFFFFFFFFFFULL * 2 * m;
 37      r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * m;
 38      r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * m;
 39      r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * m;
 40      r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * m;
 41  }
 42  
 43  static void secp256k1_fe_impl_normalize(secp256k1_fe *r) {
 44      uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
 45  
 46      /* Reduce t4 at the start so there will be at most a single carry from the first pass */
 47      uint64_t m;
 48      uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
 49  
 50      /* The first pass ensures the magnitude is 1, ... */
 51      t0 += x * 0x1000003D1ULL;
 52      t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
 53      t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
 54      t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
 55      t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
 56  
 57      /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
 58      VERIFY_CHECK(t4 >> 49 == 0);
 59  
 60      /* At most a single final reduction is needed; check if the value is >= the field characteristic */
 61      x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
 62          & (t0 >= 0xFFFFEFFFFFC2FULL));
 63  
 64      /* Apply the final reduction (for constant-time behaviour, we do it always) */
 65      t0 += x * 0x1000003D1ULL;
 66      t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
 67      t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
 68      t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
 69      t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
 70  
 71      /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
 72      VERIFY_CHECK(t4 >> 48 == x);
 73  
 74      /* Mask off the possible multiple of 2^256 from the final reduction */
 75      t4 &= 0x0FFFFFFFFFFFFULL;
 76  
 77      r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
 78  }
 79  
 80  static void secp256k1_fe_impl_normalize_weak(secp256k1_fe *r) {
 81      uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
 82  
 83      /* Reduce t4 at the start so there will be at most a single carry from the first pass */
 84      uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
 85  
 86      /* The first pass ensures the magnitude is 1, ... */
 87      t0 += x * 0x1000003D1ULL;
 88      t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
 89      t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
 90      t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
 91      t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
 92  
 93      /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
 94      VERIFY_CHECK(t4 >> 49 == 0);
 95  
 96      r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
 97  }
 98  
 99  static void secp256k1_fe_impl_normalize_var(secp256k1_fe *r) {
100      uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
101  
102      /* Reduce t4 at the start so there will be at most a single carry from the first pass */
103      uint64_t m;
104      uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
105  
106      /* The first pass ensures the magnitude is 1, ... */
107      t0 += x * 0x1000003D1ULL;
108      t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
109      t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; m = t1;
110      t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; m &= t2;
111      t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; m &= t3;
112  
113      /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
114      VERIFY_CHECK(t4 >> 49 == 0);
115  
116      /* At most a single final reduction is needed; check if the value is >= the field characteristic */
117      x = (t4 >> 48) | ((t4 == 0x0FFFFFFFFFFFFULL) & (m == 0xFFFFFFFFFFFFFULL)
118          & (t0 >= 0xFFFFEFFFFFC2FULL));
119  
120      if (x) {
121          t0 += 0x1000003D1ULL;
122          t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL;
123          t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL;
124          t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL;
125          t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL;
126  
127          /* If t4 didn't carry to bit 48 already, then it should have after any final reduction */
128          VERIFY_CHECK(t4 >> 48 == x);
129  
130          /* Mask off the possible multiple of 2^256 from the final reduction */
131          t4 &= 0x0FFFFFFFFFFFFULL;
132      }
133  
134      r->n[0] = t0; r->n[1] = t1; r->n[2] = t2; r->n[3] = t3; r->n[4] = t4;
135  }
136  
137  static int secp256k1_fe_impl_normalizes_to_zero(const secp256k1_fe *r) {
138      uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
139  
140      /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
141      uint64_t z0, z1;
142  
143      /* Reduce t4 at the start so there will be at most a single carry from the first pass */
144      uint64_t x = t4 >> 48; t4 &= 0x0FFFFFFFFFFFFULL;
145  
146      /* The first pass ensures the magnitude is 1, ... */
147      t0 += x * 0x1000003D1ULL;
148      t1 += (t0 >> 52); t0 &= 0xFFFFFFFFFFFFFULL; z0  = t0; z1  = t0 ^ 0x1000003D0ULL;
149      t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
150      t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
151      t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
152                                                  z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
153  
154      /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
155      VERIFY_CHECK(t4 >> 49 == 0);
156  
157      return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
158  }
159  
160  static int secp256k1_fe_impl_normalizes_to_zero_var(const secp256k1_fe *r) {
161      uint64_t t0, t1, t2, t3, t4;
162      uint64_t z0, z1;
163      uint64_t x;
164  
165      t0 = r->n[0];
166      t4 = r->n[4];
167  
168      /* Reduce t4 at the start so there will be at most a single carry from the first pass */
169      x = t4 >> 48;
170  
171      /* The first pass ensures the magnitude is 1, ... */
172      t0 += x * 0x1000003D1ULL;
173  
174      /* z0 tracks a possible raw value of 0, z1 tracks a possible raw value of P */
175      z0 = t0 & 0xFFFFFFFFFFFFFULL;
176      z1 = z0 ^ 0x1000003D0ULL;
177  
178      /* Fast return path should catch the majority of cases */
179      if ((z0 != 0ULL) & (z1 != 0xFFFFFFFFFFFFFULL)) {
180          return 0;
181      }
182  
183      t1 = r->n[1];
184      t2 = r->n[2];
185      t3 = r->n[3];
186  
187      t4 &= 0x0FFFFFFFFFFFFULL;
188  
189      t1 += (t0 >> 52);
190      t2 += (t1 >> 52); t1 &= 0xFFFFFFFFFFFFFULL; z0 |= t1; z1 &= t1;
191      t3 += (t2 >> 52); t2 &= 0xFFFFFFFFFFFFFULL; z0 |= t2; z1 &= t2;
192      t4 += (t3 >> 52); t3 &= 0xFFFFFFFFFFFFFULL; z0 |= t3; z1 &= t3;
193                                                  z0 |= t4; z1 &= t4 ^ 0xF000000000000ULL;
194  
195      /* ... except for a possible carry at bit 48 of t4 (i.e. bit 256 of the field element) */
196      VERIFY_CHECK(t4 >> 49 == 0);
197  
198      return (z0 == 0) | (z1 == 0xFFFFFFFFFFFFFULL);
199  }
200  
201  SECP256K1_INLINE static void secp256k1_fe_impl_set_int(secp256k1_fe *r, int a) {
202      r->n[0] = a;
203      r->n[1] = r->n[2] = r->n[3] = r->n[4] = 0;
204  }
205  
206  SECP256K1_INLINE static int secp256k1_fe_impl_is_zero(const secp256k1_fe *a) {
207      const uint64_t *t = a->n;
208      return (t[0] | t[1] | t[2] | t[3] | t[4]) == 0;
209  }
210  
211  SECP256K1_INLINE static int secp256k1_fe_impl_is_odd(const secp256k1_fe *a) {
212      return a->n[0] & 1;
213  }
214  
215  static int secp256k1_fe_impl_cmp_var(const secp256k1_fe *a, const secp256k1_fe *b) {
216      int i;
217      for (i = 4; i >= 0; i--) {
218          if (a->n[i] > b->n[i]) {
219              return 1;
220          }
221          if (a->n[i] < b->n[i]) {
222              return -1;
223          }
224      }
225      return 0;
226  }
227  
228  static void secp256k1_fe_impl_set_b32_mod(secp256k1_fe *r, const unsigned char *a) {
229      r->n[0] = (uint64_t)a[31]
230              | ((uint64_t)a[30] << 8)
231              | ((uint64_t)a[29] << 16)
232              | ((uint64_t)a[28] << 24)
233              | ((uint64_t)a[27] << 32)
234              | ((uint64_t)a[26] << 40)
235              | ((uint64_t)(a[25] & 0xF)  << 48);
236      r->n[1] = (uint64_t)((a[25] >> 4) & 0xF)
237              | ((uint64_t)a[24] << 4)
238              | ((uint64_t)a[23] << 12)
239              | ((uint64_t)a[22] << 20)
240              | ((uint64_t)a[21] << 28)
241              | ((uint64_t)a[20] << 36)
242              | ((uint64_t)a[19] << 44);
243      r->n[2] = (uint64_t)a[18]
244              | ((uint64_t)a[17] << 8)
245              | ((uint64_t)a[16] << 16)
246              | ((uint64_t)a[15] << 24)
247              | ((uint64_t)a[14] << 32)
248              | ((uint64_t)a[13] << 40)
249              | ((uint64_t)(a[12] & 0xF) << 48);
250      r->n[3] = (uint64_t)((a[12] >> 4) & 0xF)
251              | ((uint64_t)a[11] << 4)
252              | ((uint64_t)a[10] << 12)
253              | ((uint64_t)a[9]  << 20)
254              | ((uint64_t)a[8]  << 28)
255              | ((uint64_t)a[7]  << 36)
256              | ((uint64_t)a[6]  << 44);
257      r->n[4] = (uint64_t)a[5]
258              | ((uint64_t)a[4] << 8)
259              | ((uint64_t)a[3] << 16)
260              | ((uint64_t)a[2] << 24)
261              | ((uint64_t)a[1] << 32)
262              | ((uint64_t)a[0] << 40);
263  }
264  
265  static int secp256k1_fe_impl_set_b32_limit(secp256k1_fe *r, const unsigned char *a) {
266      secp256k1_fe_impl_set_b32_mod(r, a);
267      return !((r->n[4] == 0x0FFFFFFFFFFFFULL) & ((r->n[3] & r->n[2] & r->n[1]) == 0xFFFFFFFFFFFFFULL) & (r->n[0] >= 0xFFFFEFFFFFC2FULL));
268  }
269  
270  /** Convert a field element to a 32-byte big endian value. Requires the input to be normalized */
271  static void secp256k1_fe_impl_get_b32(unsigned char *r, const secp256k1_fe *a) {
272      r[0] = (a->n[4] >> 40) & 0xFF;
273      r[1] = (a->n[4] >> 32) & 0xFF;
274      r[2] = (a->n[4] >> 24) & 0xFF;
275      r[3] = (a->n[4] >> 16) & 0xFF;
276      r[4] = (a->n[4] >> 8) & 0xFF;
277      r[5] = a->n[4] & 0xFF;
278      r[6] = (a->n[3] >> 44) & 0xFF;
279      r[7] = (a->n[3] >> 36) & 0xFF;
280      r[8] = (a->n[3] >> 28) & 0xFF;
281      r[9] = (a->n[3] >> 20) & 0xFF;
282      r[10] = (a->n[3] >> 12) & 0xFF;
283      r[11] = (a->n[3] >> 4) & 0xFF;
284      r[12] = ((a->n[2] >> 48) & 0xF) | ((a->n[3] & 0xF) << 4);
285      r[13] = (a->n[2] >> 40) & 0xFF;
286      r[14] = (a->n[2] >> 32) & 0xFF;
287      r[15] = (a->n[2] >> 24) & 0xFF;
288      r[16] = (a->n[2] >> 16) & 0xFF;
289      r[17] = (a->n[2] >> 8) & 0xFF;
290      r[18] = a->n[2] & 0xFF;
291      r[19] = (a->n[1] >> 44) & 0xFF;
292      r[20] = (a->n[1] >> 36) & 0xFF;
293      r[21] = (a->n[1] >> 28) & 0xFF;
294      r[22] = (a->n[1] >> 20) & 0xFF;
295      r[23] = (a->n[1] >> 12) & 0xFF;
296      r[24] = (a->n[1] >> 4) & 0xFF;
297      r[25] = ((a->n[0] >> 48) & 0xF) | ((a->n[1] & 0xF) << 4);
298      r[26] = (a->n[0] >> 40) & 0xFF;
299      r[27] = (a->n[0] >> 32) & 0xFF;
300      r[28] = (a->n[0] >> 24) & 0xFF;
301      r[29] = (a->n[0] >> 16) & 0xFF;
302      r[30] = (a->n[0] >> 8) & 0xFF;
303      r[31] = a->n[0] & 0xFF;
304  }
305  
306  SECP256K1_INLINE static void secp256k1_fe_impl_negate_unchecked(secp256k1_fe *r, const secp256k1_fe *a, int m) {
307      /* For all legal values of m (0..31), the following properties hold: */
308      VERIFY_CHECK(0xFFFFEFFFFFC2FULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
309      VERIFY_CHECK(0xFFFFFFFFFFFFFULL * 2 * (m + 1) >= 0xFFFFFFFFFFFFFULL * 2 * m);
310      VERIFY_CHECK(0x0FFFFFFFFFFFFULL * 2 * (m + 1) >= 0x0FFFFFFFFFFFFULL * 2 * m);
311  
312      /* Due to the properties above, the left hand in the subtractions below is never less than
313       * the right hand. */
314      r->n[0] = 0xFFFFEFFFFFC2FULL * 2 * (m + 1) - a->n[0];
315      r->n[1] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[1];
316      r->n[2] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[2];
317      r->n[3] = 0xFFFFFFFFFFFFFULL * 2 * (m + 1) - a->n[3];
318      r->n[4] = 0x0FFFFFFFFFFFFULL * 2 * (m + 1) - a->n[4];
319  }
320  
321  SECP256K1_INLINE static void secp256k1_fe_impl_mul_int_unchecked(secp256k1_fe *r, int a) {
322      r->n[0] *= a;
323      r->n[1] *= a;
324      r->n[2] *= a;
325      r->n[3] *= a;
326      r->n[4] *= a;
327  }
328  
329  SECP256K1_INLINE static void secp256k1_fe_impl_add_int(secp256k1_fe *r, int a) {
330      r->n[0] += a;
331  }
332  
333  SECP256K1_INLINE static void secp256k1_fe_impl_add(secp256k1_fe *r, const secp256k1_fe *a) {
334      r->n[0] += a->n[0];
335      r->n[1] += a->n[1];
336      r->n[2] += a->n[2];
337      r->n[3] += a->n[3];
338      r->n[4] += a->n[4];
339  }
340  
341  SECP256K1_INLINE static void secp256k1_fe_impl_mul(secp256k1_fe *r, const secp256k1_fe *a, const secp256k1_fe * SECP256K1_RESTRICT b) {
342      secp256k1_fe_mul_inner(r->n, a->n, b->n);
343  }
344  
345  SECP256K1_INLINE static void secp256k1_fe_impl_sqr(secp256k1_fe *r, const secp256k1_fe *a) {
346      secp256k1_fe_sqr_inner(r->n, a->n);
347  }
348  
349  SECP256K1_INLINE static void secp256k1_fe_impl_cmov(secp256k1_fe *r, const secp256k1_fe *a, int flag) {
350      uint64_t mask0, mask1;
351      volatile int vflag = flag;
352      VERIFY_CHECK(flag == 0 || flag == 1);
353      SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
354      mask0 = vflag + ~((uint64_t)0);
355      mask1 = ~mask0;
356      r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
357      r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
358      r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
359      r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
360      r->n[4] = (r->n[4] & mask0) | (a->n[4] & mask1);
361  }
362  
363  static SECP256K1_INLINE void secp256k1_fe_impl_half(secp256k1_fe *r) {
364      uint64_t t0 = r->n[0], t1 = r->n[1], t2 = r->n[2], t3 = r->n[3], t4 = r->n[4];
365      uint64_t one = (uint64_t)1;
366      uint64_t mask = -(t0 & one) >> 12;
367  
368      /* Bounds analysis (over the rationals).
369       *
370       * Let m = r->magnitude
371       *     C = 0xFFFFFFFFFFFFFULL * 2
372       *     D = 0x0FFFFFFFFFFFFULL * 2
373       *
374       * Initial bounds: t0..t3 <= C * m
375       *                     t4 <= D * m
376       */
377  
378      t0 += 0xFFFFEFFFFFC2FULL & mask;
379      t1 += mask;
380      t2 += mask;
381      t3 += mask;
382      t4 += mask >> 4;
383  
384      VERIFY_CHECK((t0 & one) == 0);
385  
386      /* t0..t3: added <= C/2
387       *     t4: added <= D/2
388       *
389       * Current bounds: t0..t3 <= C * (m + 1/2)
390       *                     t4 <= D * (m + 1/2)
391       */
392  
393      r->n[0] = (t0 >> 1) + ((t1 & one) << 51);
394      r->n[1] = (t1 >> 1) + ((t2 & one) << 51);
395      r->n[2] = (t2 >> 1) + ((t3 & one) << 51);
396      r->n[3] = (t3 >> 1) + ((t4 & one) << 51);
397      r->n[4] = (t4 >> 1);
398  
399      /* t0..t3: shifted right and added <= C/4 + 1/2
400       *     t4: shifted right
401       *
402       * Current bounds: t0..t3 <= C * (m/2 + 1/2)
403       *                     t4 <= D * (m/2 + 1/4)
404       *
405       * Therefore the output magnitude (M) has to be set such that:
406       *     t0..t3: C * M >= C * (m/2 + 1/2)
407       *         t4: D * M >= D * (m/2 + 1/4)
408       *
409       * It suffices for all limbs that, for any input magnitude m:
410       *     M >= m/2 + 1/2
411       *
412       * and since we want the smallest such integer value for M:
413       *     M == floor(m/2) + 1
414       */
415  }
416  
417  static SECP256K1_INLINE void secp256k1_fe_storage_cmov(secp256k1_fe_storage *r, const secp256k1_fe_storage *a, int flag) {
418      uint64_t mask0, mask1;
419      volatile int vflag = flag;
420      VERIFY_CHECK(flag == 0 || flag == 1);
421      SECP256K1_CHECKMEM_CHECK_VERIFY(r->n, sizeof(r->n));
422      mask0 = vflag + ~((uint64_t)0);
423      mask1 = ~mask0;
424      r->n[0] = (r->n[0] & mask0) | (a->n[0] & mask1);
425      r->n[1] = (r->n[1] & mask0) | (a->n[1] & mask1);
426      r->n[2] = (r->n[2] & mask0) | (a->n[2] & mask1);
427      r->n[3] = (r->n[3] & mask0) | (a->n[3] & mask1);
428  }
429  
430  static void secp256k1_fe_impl_to_storage(secp256k1_fe_storage *r, const secp256k1_fe *a) {
431      r->n[0] = a->n[0] | a->n[1] << 52;
432      r->n[1] = a->n[1] >> 12 | a->n[2] << 40;
433      r->n[2] = a->n[2] >> 24 | a->n[3] << 28;
434      r->n[3] = a->n[3] >> 36 | a->n[4] << 16;
435  }
436  
437  static SECP256K1_INLINE void secp256k1_fe_impl_from_storage(secp256k1_fe *r, const secp256k1_fe_storage *a) {
438      r->n[0] = a->n[0] & 0xFFFFFFFFFFFFFULL;
439      r->n[1] = a->n[0] >> 52 | ((a->n[1] << 12) & 0xFFFFFFFFFFFFFULL);
440      r->n[2] = a->n[1] >> 40 | ((a->n[2] << 24) & 0xFFFFFFFFFFFFFULL);
441      r->n[3] = a->n[2] >> 28 | ((a->n[3] << 36) & 0xFFFFFFFFFFFFFULL);
442      r->n[4] = a->n[3] >> 16;
443  }
444  
445  static void secp256k1_fe_from_signed62(secp256k1_fe *r, const secp256k1_modinv64_signed62 *a) {
446      const uint64_t M52 = UINT64_MAX >> 12;
447      const uint64_t a0 = a->v[0], a1 = a->v[1], a2 = a->v[2], a3 = a->v[3], a4 = a->v[4];
448  
449      /* The output from secp256k1_modinv64{_var} should be normalized to range [0,modulus), and
450       * have limbs in [0,2^62). The modulus is < 2^256, so the top limb must be below 2^(256-62*4).
451       */
452      VERIFY_CHECK(a0 >> 62 == 0);
453      VERIFY_CHECK(a1 >> 62 == 0);
454      VERIFY_CHECK(a2 >> 62 == 0);
455      VERIFY_CHECK(a3 >> 62 == 0);
456      VERIFY_CHECK(a4 >> 8 == 0);
457  
458      r->n[0] =  a0                   & M52;
459      r->n[1] = (a0 >> 52 | a1 << 10) & M52;
460      r->n[2] = (a1 >> 42 | a2 << 20) & M52;
461      r->n[3] = (a2 >> 32 | a3 << 30) & M52;
462      r->n[4] = (a3 >> 22 | a4 << 40);
463  }
464  
465  static void secp256k1_fe_to_signed62(secp256k1_modinv64_signed62 *r, const secp256k1_fe *a) {
466      const uint64_t M62 = UINT64_MAX >> 2;
467      const uint64_t a0 = a->n[0], a1 = a->n[1], a2 = a->n[2], a3 = a->n[3], a4 = a->n[4];
468  
469      r->v[0] = (a0       | a1 << 52) & M62;
470      r->v[1] = (a1 >> 10 | a2 << 42) & M62;
471      r->v[2] = (a2 >> 20 | a3 << 32) & M62;
472      r->v[3] = (a3 >> 30 | a4 << 22) & M62;
473      r->v[4] =  a4 >> 40;
474  }
475  
476  static const secp256k1_modinv64_modinfo secp256k1_const_modinfo_fe = {
477      {{-0x1000003D1LL, 0, 0, 0, 256}},
478      0x27C7F6E22DDACACFLL
479  };
480  
481  static void secp256k1_fe_impl_inv(secp256k1_fe *r, const secp256k1_fe *x) {
482      secp256k1_fe tmp = *x;
483      secp256k1_modinv64_signed62 s;
484  
485      secp256k1_fe_normalize(&tmp);
486      secp256k1_fe_to_signed62(&s, &tmp);
487      secp256k1_modinv64(&s, &secp256k1_const_modinfo_fe);
488      secp256k1_fe_from_signed62(r, &s);
489  }
490  
491  static void secp256k1_fe_impl_inv_var(secp256k1_fe *r, const secp256k1_fe *x) {
492      secp256k1_fe tmp = *x;
493      secp256k1_modinv64_signed62 s;
494  
495      secp256k1_fe_normalize_var(&tmp);
496      secp256k1_fe_to_signed62(&s, &tmp);
497      secp256k1_modinv64_var(&s, &secp256k1_const_modinfo_fe);
498      secp256k1_fe_from_signed62(r, &s);
499  }
500  
501  static int secp256k1_fe_impl_is_square_var(const secp256k1_fe *x) {
502      secp256k1_fe tmp;
503      secp256k1_modinv64_signed62 s;
504      int jac, ret;
505  
506      tmp = *x;
507      secp256k1_fe_normalize_var(&tmp);
508      /* secp256k1_jacobi64_maybe_var cannot deal with input 0. */
509      if (secp256k1_fe_is_zero(&tmp)) return 1;
510      secp256k1_fe_to_signed62(&s, &tmp);
511      jac = secp256k1_jacobi64_maybe_var(&s, &secp256k1_const_modinfo_fe);
512      if (jac == 0) {
513          /* secp256k1_jacobi64_maybe_var failed to compute the Jacobi symbol. Fall back
514           * to computing a square root. This should be extremely rare with random
515           * input (except in VERIFY mode, where a lower iteration count is used). */
516          secp256k1_fe dummy;
517          ret = secp256k1_fe_sqrt(&dummy, &tmp);
518      } else {
519          ret = jac >= 0;
520      }
521      return ret;
522  }
523  
524  #endif /* SECP256K1_FIELD_REPR_IMPL_H */