/ src / mpmult.c
mpmult.c
  1  #include <sodium.h>
  2  #include <stdint.h>
  3  #include <string.h>
  4  #include "toprf.h"
  5  #include "dkg-vss.h"
  6  #include "dkg.h"
  7  #ifdef UNIT_TEST
  8  #include "utils.h"
  9  #endif
 10  
 11  /** Implements the Simple-Mult algorithm from page 5 fig. 2 of
 12      "Simplified VSS and Fast-track Multiparty Computations with
 13      Applications to Threshold Cryptography" by Gennaro, Rabin, Rabin,
 14      1998.
 15   */
 16  
 17  static int cmp(const uint8_t a[crypto_core_ristretto255_SCALARBYTES], const uint8_t b[crypto_core_ristretto255_SCALARBYTES]) {
 18    // non-const time! but its ok, this operates on the vandermonde matrix, no secrets involved
 19    for(int i=crypto_core_ristretto255_SCALARBYTES-1;i>=0;i--) {
 20      if(a[i]>b[i]) return 1;
 21      if(a[i]<b[i]) return -1;
 22    }
 23    return 0;
 24  }
 25  
 26  static void r255div(uint8_t r[crypto_core_ristretto255_SCALARBYTES],
 27           const uint8_t a[crypto_core_ristretto255_SCALARBYTES],
 28           const uint8_t b[crypto_core_ristretto255_SCALARBYTES]) {
 29        uint8_t b_inv[crypto_core_ristretto255_SCALARBYTES];
 30        crypto_core_ristretto255_scalar_invert(b_inv, b);
 31        crypto_core_ristretto255_scalar_mul(r, a, b_inv);
 32  }
 33  
 34  static void gaussian(const uint8_t n, uint8_t a[n][n][crypto_core_ristretto255_SCALARBYTES], uint8_t index[n]) {
 35    uint8_t c[n][crypto_core_ristretto255_SCALARBYTES];
 36    memset(c,0,sizeof c);
 37  
 38    for(uint8_t i=0;i<n;i++) {
 39      index[i]=i;
 40    }
 41  
 42    for(uint8_t i=0; i<n; i++) {
 43      uint8_t c1[crypto_core_ristretto255_SCALARBYTES]={0};
 44      for(uint8_t j=0; j<n; j++) {
 45        if(cmp(a[i][j],c1)>0) {// a[i][j] > c1
 46          memcpy(c1,&a[i][j],crypto_core_ristretto255_SCALARBYTES);
 47        }
 48      }
 49      memcpy(&c[i],c1,crypto_core_ristretto255_SCALARBYTES);
 50    }
 51  
 52    uint8_t k=0;
 53    for(uint8_t j=0;j<n - 1;j++) {
 54      uint8_t pi1[crypto_core_ristretto255_SCALARBYTES]={0};
 55      for(uint8_t i=j;i<n;i++) {
 56        uint8_t pi0[crypto_core_ristretto255_SCALARBYTES];
 57  
 58        // pi0 = a[index[i]][j] / c[index[i]]
 59        r255div(pi0, a[index[i]][j], c[index[i]]);
 60        // pi0 > pi1?
 61        if(cmp(pi0,pi1)>0) {// pi0 > pi1
 62          memcpy(pi1,pi0,crypto_core_ristretto255_SCALARBYTES);
 63          k=i;
 64        }
 65      }
 66  
 67      // swap index[j] and index[k]
 68      uint8_t prev_index_j=index[j];
 69      index[j] = index[k];
 70      index[k] = prev_index_j;
 71  
 72      for(uint8_t i=j+1; i<n; i++) {
 73        // pj = a[index[i]][j] / a[index[j]][j]
 74        uint8_t pj[crypto_core_ristretto255_SCALARBYTES];
 75        r255div(pj, a[index[i]][j], a[index[j]][j]);
 76  
 77        memcpy(&a[index[i]][j], pj, crypto_core_ristretto255_SCALARBYTES);
 78  
 79        for(uint8_t l=j+1; l<n; l++) {
 80          uint8_t tmp[crypto_core_ristretto255_SCALARBYTES];
 81          // a[index[i]][l] -= pj * a[index[j]][l]
 82          crypto_core_ristretto255_scalar_mul(tmp, pj, a[index[j]][l]);
 83          crypto_core_ristretto255_scalar_sub(a[index[i]][l], a[index[i]][l], tmp);
 84        }
 85      }
 86    }
 87  }
 88  
 89  static void invert(const uint8_t n,
 90                     uint8_t a[n][n][crypto_core_ristretto255_SCALARBYTES],
 91                     uint8_t x[n][n][crypto_core_ristretto255_SCALARBYTES]) {
 92    uint8_t b[n][n][crypto_core_ristretto255_SCALARBYTES];
 93    memset(b,0,sizeof b);
 94  
 95    for(int i=0;i<n;i++) {
 96      b[i][i][0]=1;
 97    }
 98    uint8_t index[n];
 99  
100    gaussian(n, a, index);
101  
102    for(uint8_t i=0; i < n-1; i++) {
103      for(uint8_t j= i+1 ; j<n; j++) {
104        for(uint8_t k=0; k<n; k++) {
105          uint8_t tmp[crypto_core_ristretto255_SCALARBYTES];
106          // b[index[j]][k] -= a[index[j]][i] * b[index[i]][k]
107          crypto_core_ristretto255_scalar_mul(tmp, a[index[j]][i], b[index[i]][k]);
108          crypto_core_ristretto255_scalar_sub(b[index[j]][k], b[index[j]][k], tmp);
109        }
110      }
111    }
112  
113    for(uint8_t i=0; i<n; i++) {
114      uint8_t tmp[crypto_core_ristretto255_SCALARBYTES];
115      // x[n-1][i] = b[index[n-1]][i] / a[index[n-1]][n-1]
116      crypto_core_ristretto255_scalar_invert(tmp, a[index[n-1]][n-1]);
117      crypto_core_ristretto255_scalar_mul(x[n-1][i], b[index[n-1]][i], tmp);
118      for(int j = n-2; j>=0; j--) {
119        memcpy(&x[j][i], &b[index[j]][i], crypto_core_ristretto255_SCALARBYTES);
120        for(int k = j+1; k<n; k++) {
121          // x[j][i] -= a[index[j]][k] * x[k][i]
122          crypto_core_ristretto255_scalar_mul(tmp, a[index[j]][k],x[k][i]);
123          crypto_core_ristretto255_scalar_sub(x[j][i],x[j][i], tmp);
124        }
125        // x[j][i] /= a[index[j]][j]
126        crypto_core_ristretto255_scalar_invert(tmp, a[index[j]][j]);
127        crypto_core_ristretto255_scalar_mul(x[j][i], x[j][i], tmp);
128      }
129    }
130  }
131  
132  static void genVDMmatrix(const uint8_t indexes[], const uint8_t index_len,
133                           uint8_t matrix[index_len][index_len][crypto_core_ristretto255_SCALARBYTES]) {
134    memset(matrix,0,index_len*index_len*((unsigned) crypto_core_ristretto255_SCALARBYTES));
135    for(uint8_t i=0;i<index_len;i++) {
136      uint8_t base[crypto_core_ristretto255_SCALARBYTES]={0};
137      base[0]=indexes[i];
138      for(uint8_t j=0;j<index_len;j++) {
139        matrix[i][j][0]=1;
140        for(uint8_t k=0;k<j;k++) {
141          crypto_core_ristretto255_scalar_mul(matrix[i][j], matrix[i][j], base);
142        }
143      }
144    }
145  }
146  
147  void __attribute__((visibility("hidden"))) invertedVDMmatrix(const uint8_t dealers,
148                                                               const uint8_t indexes[dealers],
149                                                               uint8_t inverted[dealers][dealers][crypto_core_ristretto255_SCALARBYTES]) {
150    uint8_t vdm[dealers][dealers][crypto_core_ristretto255_SCALARBYTES];
151    genVDMmatrix(indexes, dealers, vdm);
152    invert(dealers, vdm, inverted);
153  }
154  
155  int toprf_mpc_mul_start(const uint8_t _a[TOPRF_Share_BYTES],
156                          const uint8_t _b[TOPRF_Share_BYTES],
157                          const uint8_t peers, const uint8_t threshold,
158                          uint8_t shares[peers][TOPRF_Share_BYTES]) {
159    const TOPRF_Share *a=(TOPRF_Share*) _a;
160    const TOPRF_Share *b=(TOPRF_Share*) _b;
161  
162    if(a->index!=b->index ||
163       peers<threshold ||
164       threshold == 0) return 1;
165  
166    uint8_t ab[crypto_core_ristretto255_SCALARBYTES];
167    crypto_core_ristretto255_scalar_mul(ab, a->value, b->value);
168    //dump(ab, sizeof ab, "ab");
169    toprf_create_shares(ab, peers, threshold, shares);
170    //for(unsigned j=0;j<peers;j++)
171      //dump(shares[j], TOPRF_Share_BYTES, "mulshare");
172    return 0;
173  }
174  
175  void toprf_mpc_mul_finish(const uint8_t dealers,
176                            const uint8_t indexes[dealers],
177                            const uint8_t peer,
178                            const uint8_t shares[dealers][TOPRF_Share_BYTES],
179                            uint8_t _share[TOPRF_Share_BYTES]) {
180    TOPRF_Share *share=(TOPRF_Share*) _share;
181  
182    // pre-calculate inverted vandermonde matrix of the indexes of the peers
183    uint8_t inverted[dealers][dealers][crypto_core_ristretto255_SCALARBYTES];
184    invertedVDMmatrix(dealers, indexes, inverted);
185    // todo optimization
186    // note this can be precomputed and broadcast to all peers, and only
187    // the first row of this matix is actually needed by the peers.
188  
189    // execute step 2 of simple mult
190    // H(j) = sum(lambda[i] * h[i](j) for i in 1..2t+1)
191    memset(share,0,TOPRF_Share_BYTES);
192    share->index=peer;
193    uint8_t tmp[crypto_core_ristretto255_SCALARBYTES];
194    for(unsigned i=0;i<dealers;i++) {
195      crypto_core_ristretto255_scalar_mul(tmp, shares[i]+1, inverted[0][i]);
196      //dump(shares[i], TOPRF_Share_BYTES, "mulshare[i][j]");
197      crypto_core_ristretto255_scalar_add(share->value, share->value, tmp);
198    }
199    //dump(share->value, sizeof share->value, "share");
200  }
201  
202  static int vsps_check(const uint8_t t,
203                        const uint8_t A[t][crypto_core_ristretto255_BYTES],
204                        const uint8_t lambda[t+1][t+1][crypto_core_ristretto255_SCALARBYTES],
205                        const uint8_t delta_exp[t+1][crypto_core_ristretto255_SCALARBYTES],
206                        uint8_t v[crypto_core_ristretto255_BYTES]) {
207    // calculates Π(A_i ^ Δ_i), where i=1..t+1,  Δ_i = Σ(λ_ji * δ^j,  j= 0..t
208  
209    // v = 0
210    memset(v, 0,crypto_core_ristretto255_BYTES);
211  
212    for(int i=0;i<=t;i++) {
213      uint8_t delta_i[crypto_core_ristretto255_SCALARBYTES]={0};
214      for(int j=0;j<=t;j++) {
215        // calculate λ_ji * δ^j
216        uint8_t tmp[crypto_core_ristretto255_SCALARBYTES];
217  #ifdef UNIT_TEST
218        dump(lambda[j][i], crypto_core_ristretto255_SCALARBYTES, "vdm[%d,%d]", j, i);
219        dump(delta_exp[j], crypto_core_ristretto255_SCALARBYTES, "d^%d", j);
220  #endif
221        crypto_core_ristretto255_scalar_mul(tmp, lambda[j][i], delta_exp[j]);
222        // Δ_i = sum_(j=0..t) (λ_ji * δ^j)
223        crypto_core_ristretto255_scalar_add(delta_i, delta_i, tmp);
224      }
225  #ifdef UNIT_TEST
226      dump(delta_i,sizeof delta_i, "Δ%d", i);
227  #endif
228      uint8_t tmp[crypto_core_ristretto255_BYTES];
229      // A_i ^ Δ_i
230      if(0!=crypto_scalarmult_ristretto255(tmp, delta_i, A[i])) return 1;
231  #ifdef UNIT_TEST
232      dump(tmp, crypto_scalarmult_ristretto255_BYTES, "A%d^Δ%d", i, i);
233  #endif
234      // Π, but we are in an additive group
235      crypto_core_ristretto255_add(v, v, tmp);
236    }
237  
238    return 0;
239  }
240  
241  int toprf_mpc_vsps_check(const uint8_t t, const uint8_t A[t*2][crypto_core_ristretto255_BYTES]) {
242    uint8_t indexes[t+1]; // p8para3L2: A0..At & At+1..A2t+1
243    // left-hand side of the equation (1)
244    for(uint8_t i=0;i<=t;i++) indexes[i]=i; // left side of equation Π i:=1..t, which is a typo? should be 0..t
245    uint8_t lambda[t+1][t+1][crypto_core_ristretto255_SCALARBYTES];
246    invertedVDMmatrix(t+1,indexes,lambda);
247  #ifdef UNIT_TEST
248    if(liboprf_log_file!=NULL && liboprf_debug) fprintf(liboprf_log_file,"vdm1\n");
249    for(int i=0;i<t+1;i++) {
250      for(int j=0;j<t+1;j++) {
251        if(liboprf_log_file!=NULL && liboprf_debug) fprintf(liboprf_log_file,"\t");
252        dump(lambda[i][j], crypto_core_ristretto255_SCALARBYTES, "vdm[%d,%d]", i, j);
253      }
254    }
255  #endif
256  
257    // chose random δ, p8para3L4
258    uint8_t delta[crypto_core_ristretto255_SCALARBYTES] = {0};
259  #ifdef UNIT_TEST
260    debian_rng_scalar(delta);
261    dump(delta,sizeof delta, "δ");
262  #else
263    crypto_core_ristretto255_scalar_random(delta);
264  #endif
265  
266    // pre-calculate δ^j for j=0..t
267    uint8_t delta_exp[t+1][crypto_core_ristretto255_SCALARBYTES];
268    memset(delta_exp,0,sizeof delta_exp);
269    delta_exp[0][0]=1;
270    for(int exp=1;exp<=t;exp++) {
271      crypto_core_ristretto255_scalar_mul(delta_exp[exp], delta_exp[exp-1], delta);
272    }
273  
274    uint8_t lhs[crypto_core_ristretto255_BYTES] = {0};
275    if(0!=vsps_check(t, A, lambda, delta_exp, lhs)) return 1;
276  #ifdef UNIT_TEST
277    dump(lhs, sizeof lhs, "lhs");
278  #endif
279  
280    // right-hand side of the equation (1)
281    // since the RHS has A_i, i:=t+1..2t+1 see p8para3L2
282    for(uint8_t i=0;i<=t;i++) indexes[i]=(uint8_t) (t+1U+i);
283    invertedVDMmatrix(t+1,indexes,lambda);
284  #ifdef UNIT_TEST
285    if(liboprf_log_file!=NULL && liboprf_debug) fprintf(liboprf_log_file,"vdm2\n");
286    for(int i=0;i<t+1;i++) {
287      for(int j=0;j<t+1;j++) {
288        if(liboprf_log_file!=NULL && liboprf_debug) fprintf(liboprf_log_file,"\t");
289        dump(lambda[i][j], crypto_core_ristretto255_SCALARBYTES, "vdm[%d,%d]", i, j);
290      }
291    }
292  #endif
293  
294    uint8_t rhs[crypto_core_ristretto255_BYTES] = {0};
295    if(0!=vsps_check(t, &A[t+1], lambda, delta_exp, rhs)) return 1;
296  #ifdef UNIT_TEST
297    dump(rhs, sizeof rhs, "rhs");
298  #endif
299  
300    // lhs == rhs
301    if(memcmp(lhs,rhs,sizeof lhs)!=0) return 1;
302    return 0;
303  }
304  
305  // todo remove dealers param, can be calculatted from t param
306  int toprf_mpc_ftmult_step1(const uint8_t dealers, const uint8_t n, const uint8_t t, const uint8_t self,
307                             const TOPRF_Share alpha[2], const TOPRF_Share beta[2],
308                             const uint8_t lambdas[dealers][crypto_core_ristretto255_SCALARBYTES],
309                             TOPRF_Share ci_shares[n][2],
310                             uint8_t ci_commitments[n+1][crypto_core_ristretto255_BYTES],
311                             uint8_t ci_tau[crypto_core_ristretto255_SCALARBYTES]) {
312    // step 1. Each player P_i shares λ_iα_iβ_i, using VSS
313    if(lambdas==NULL) {
314       uint8_t indexes[dealers];
315       for(uint8_t i=0;i<dealers;i++) indexes[i]=i+1;
316  
317       // λ_i is row 1 of inv VDM matrix
318       uint8_t vdm[dealers][dealers][crypto_core_ristretto255_SCALARBYTES];
319       invertedVDMmatrix(dealers, indexes, vdm);
320       lambdas = vdm[0];
321    }
322  
323    //dump((uint8_t*) alpha, sizeof(TOPRF_Share)*2, "alpha[%d]", self);
324    //dump((uint8_t*) beta, sizeof(TOPRF_Share)*2, "beta[%d]", self);
325  
326    // c_ij = 𝑓_αβ,i(j), where 𝑓_αβ,i is a random polynomials of degree t, such that 𝑓_αβ,i(0) = λ_iα_iβ_i
327    // τ_ij = u_i(j), where u_i(j) is a random polynomials of degree t
328  
329    uint8_t lambda_ai_bi[crypto_scalarmult_ristretto255_SCALARBYTES];
330    crypto_core_ristretto255_scalar_mul(lambda_ai_bi, alpha[0].value, beta[0].value);
331    crypto_core_ristretto255_scalar_mul(lambda_ai_bi, lambda_ai_bi, lambdas[self]);
332    if(0!=dkg_vss_share(n,t,lambda_ai_bi, &ci_commitments[1], ci_shares, ci_tau)) return 1;
333    // c_i0 for the sake of the ZK proof is g^λab * h^t
334    if(0!=dkg_vss_commit(lambda_ai_bi, ci_tau, ci_commitments[0])) return 1;
335  
336    //fprintf(stderr, "ftmult s1: %d\n", self);
337    //for(unsigned i=0;i<n+1;i++) dump(ci_commitments[i], crypto_core_ristretto255_BYTES, "c_%d%d", self, i);
338    //for(unsigned i=0;i<n;i++) dump((uint8_t*) ci_shares[i], sizeof(TOPRF_Share)*2, "s_%d%d", self, i);
339  
340    // send ci_shares[j] to P_j
341    // broadcast ci_commitments
342    return 0;
343  }
344  
345  int toprf_mpc_ftmult_zk_commitments(const uint8_t B_i[crypto_core_ristretto255_BYTES],
346                                      uint8_t d[crypto_scalarmult_ristretto255_SCALARBYTES],
347                                      uint8_t s[crypto_scalarmult_ristretto255_SCALARBYTES],
348                                      uint8_t x[crypto_scalarmult_ristretto255_SCALARBYTES],
349                                      uint8_t s_1[crypto_scalarmult_ristretto255_SCALARBYTES],
350                                      uint8_t s_2[crypto_scalarmult_ristretto255_SCALARBYTES],
351                                      uint8_t zk_commitments[3][crypto_scalarmult_ristretto255_BYTES]) {
352    // step 2.2 P_i chooses d, s, x, s_1, s_2 ∈ Z_q. Sends to the verifier the messages:
353    //  M   = g^d * h^s,
354    //  M_1 = g^x * h^s_1,
355    //  M_2 = B^x * h^s_2
356    crypto_core_ristretto255_scalar_random(d);
357    crypto_core_ristretto255_scalar_random(s);
358    crypto_core_ristretto255_scalar_random(x);
359    crypto_core_ristretto255_scalar_random(s_1);
360    crypto_core_ristretto255_scalar_random(s_2);
361  
362    //dump(d, crypto_scalarmult_ristretto255_SCALARBYTES, "    d");
363    //dump(s, crypto_scalarmult_ristretto255_SCALARBYTES, "    s");
364    //dump(x, crypto_scalarmult_ristretto255_SCALARBYTES, "    x");
365    //dump(s_1, crypto_scalarmult_ristretto255_SCALARBYTES, "    s_1");
366    //dump(s_2, crypto_scalarmult_ristretto255_SCALARBYTES, "    s_2");
367    //  M   = g^d * h^s,
368    if(0!=dkg_vss_commit(d,s, zk_commitments[0])) return 1;
369    //dump(zk_commitments[0], crypto_scalarmult_ristretto255_BYTES, "    M");
370    //  M_1 = g^x * h^s_1,
371    if(0!=dkg_vss_commit(x,s_1, zk_commitments[1])) return 1;
372    //dump(zk_commitments[1], crypto_scalarmult_ristretto255_BYTES, "   M1");
373    //  M_2 = B^x * h^s_2
374    uint8_t tmp[crypto_scalarmult_ristretto255_BYTES];
375    if(crypto_scalarmult_ristretto255(tmp, x, B_i)) return 1;
376    if(crypto_scalarmult_ristretto255(zk_commitments[2], s_2, H)) return 1;
377    crypto_core_ristretto255_add(zk_commitments[2], zk_commitments[2], tmp);
378    //dump(zk_commitments[2], crypto_scalarmult_ristretto255_BYTES, "   M2");
379    return 0;
380  }