From 8b705f49fdf8d894c6f16b1f8e215731685d3c2d Mon Sep 17 00:00:00 2001 From: lovesh Date: Thu, 31 Oct 2024 02:15:47 +0530 Subject: [PATCH] Several changes - New protocol for proving pseudonym correctness in Syra - Use hash based commitment in some secret sharing schemes - Describe the extended NI-TZK protocol - Update notation at few places to make review easier - Upgrade wasmer to 5.0 Signed-off-by: lovesh --- legogroth16/Cargo.toml | 2 +- .../verifiable_encryption_tz_21.rs | 10 +- secret_sharing_and_dkg/README.md | 12 +- secret_sharing_and_dkg/src/abcp_dkg.rs | 241 +++--- .../src/abcp_ni_tzk_extended.rs | 52 ++ .../src/baghery_feldman_vss.rs | 219 ------ secret_sharing_and_dkg/src/baghery_vss.rs | 205 +++++ .../maliciously_secure.rs | 13 +- secret_sharing_and_dkg/src/lib.rs | 5 +- short_group_sig/src/weak_bb_sig.rs | 51 +- short_group_sig/src/weak_bb_sig_pok_cdh.rs | 8 +- syra/README.md | 37 +- syra/src/error.rs | 8 + syra/src/lib.rs | 41 +- syra/src/pseudonym.rs | 55 +- syra/src/pseudonym_alt.rs | 335 +++++++++ syra/src/setup.rs | 6 + syra/src/threshold_issuance.rs | 41 +- utils/src/solve_discrete_log.rs | 4 +- vb_accumulator/src/threshold/mod.rs | 699 +++++------------- 20 files changed, 1047 insertions(+), 997 deletions(-) create mode 100644 secret_sharing_and_dkg/src/abcp_ni_tzk_extended.rs delete mode 100644 secret_sharing_and_dkg/src/baghery_feldman_vss.rs create mode 100644 secret_sharing_and_dkg/src/baghery_vss.rs create mode 100644 syra/src/pseudonym_alt.rs diff --git a/legogroth16/Cargo.toml b/legogroth16/Cargo.toml index 5a4ba5d5..2aa854b3 100644 --- a/legogroth16/Cargo.toml +++ b/legogroth16/Cargo.toml @@ -23,7 +23,7 @@ ark-r1cs-std = { workspace = true, optional = true } tracing = { version = "0.1", default-features = false, features = [ "attributes" ], optional = true } derivative = { version = "2.0", features = ["use_core"], optional = true } rayon = { workspace = true, optional = true } -wasmer = { version = "4.3.6", optional = true, default-features = false } +wasmer = { version = "5.0.0", optional = true, default-features = false } fnv = { version = "1.0.3", default-features = false, optional = true } num-bigint = { version = "0.4", default-features = false, optional = true } log = "0.4" diff --git a/proof_system/src/sub_protocols/verifiable_encryption_tz_21.rs b/proof_system/src/sub_protocols/verifiable_encryption_tz_21.rs index 0960a0be..1dca8032 100644 --- a/proof_system/src/sub_protocols/verifiable_encryption_tz_21.rs +++ b/proof_system/src/sub_protocols/verifiable_encryption_tz_21.rs @@ -28,7 +28,6 @@ use zeroize::{Zeroize, ZeroizeOnDrop}; pub mod dkgith_decls { use super::BatchedHashedElgamalCiphertext; - use ark_ec::AffineRepr; use verifiable_encryption::tz_21::dkgith::{CompressedCiphertext, DkgithProof}; // Very large values for repetitions cause stack overflow @@ -47,7 +46,7 @@ pub mod dkgith_decls { pub const SEED_SIZE: usize = 16; pub const SALT_SIZE: usize = 32; - pub type Proof = DkgithProof< + pub type Proof = DkgithProof< G, BatchedHashedElgamalCiphertext, NUM_PARTIES, @@ -57,12 +56,11 @@ pub mod dkgith_decls { SEED_SIZE, SALT_SIZE, >; - pub type Ciphertext = + pub type Ciphertext = CompressedCiphertext, SUBSET_SIZE>; } pub mod rdkgith_decls { - use ark_ec::AffineRepr; use dock_crypto_utils::elgamal::BatchedHashedElgamalCiphertext; use verifiable_encryption::tz_21::rdkgith::{CompressedCiphertext, RdkgithProof}; @@ -77,14 +75,14 @@ pub mod rdkgith_decls { pub const NUM_PARTIES_MINUS_THRESHOLD: usize = 15; pub const SUBSET_SIZE: usize = 10; - pub type Proof = RdkgithProof< + pub type Proof = RdkgithProof< G, BatchedHashedElgamalCiphertext, NUM_PARTIES, THRESHOLD, NUM_PARTIES_MINUS_THRESHOLD, >; - pub type Ciphertext = + pub type Ciphertext = CompressedCiphertext, SUBSET_SIZE>; } diff --git a/secret_sharing_and_dkg/README.md b/secret_sharing_and_dkg/README.md index 5a7968d7..36d15079 100644 --- a/secret_sharing_and_dkg/README.md +++ b/secret_sharing_and_dkg/README.md @@ -1,7 +1,10 @@ + + # Secret sharing and distributed key generation -Implements Secret Sharing (SS), Verifiable Secret Sharing (VSS), Distributed Verifiable Secret Sharing (DVSS), Distributed -Key Generation (DKG) and Publicly Verifiable Secret Sharing (PVSS) algorithms. DVSS and DKG do not require a trusted dealer. Also implements a distributed discrete log check. +Implements Secret Sharing (SS), Verifiable Secret Sharing (VSS), Distributed Verifiable Secret Sharing (DVSS), Distributed +Key Generation (DKG) and Publicly Verifiable Secret Sharing (PVSS) algorithms. DVSS and DKG do not require a trusted dealer. +Also implements a distributed discrete log check. 1. [Shamir secret sharing (Requires a trusted dealer)](./src/shamir_ss.rs) @@ -12,4 +15,7 @@ Key Generation (DKG) and Publicly Verifiable Secret Sharing (PVSS) algorithms. D 1. [Gennaro DKG from the paper Secure Distributed Key Generation for Discrete-Log Based Cryptosystems](./src/gennaro_dkg.rs) 1. [Distributed Key Generation from FROST](./src/frost_dkg.rs) 1. [Distributed discrete log (DLOG) check](./src/distributed_dlog_check) -1. [Publicly Verifiable Secret Sharing](./src/baghery_pvss) \ No newline at end of file +1. [Publicly Verifiable Secret Sharing](./src/baghery_pvss) +1. [Verifiable Secret Sharing using hash-commitments](./src/baghery_vss.rs) + + diff --git a/secret_sharing_and_dkg/src/abcp_dkg.rs b/secret_sharing_and_dkg/src/abcp_dkg.rs index 57e646ff..10a037fa 100644 --- a/secret_sharing_and_dkg/src/abcp_dkg.rs +++ b/secret_sharing_and_dkg/src/abcp_dkg.rs @@ -13,22 +13,23 @@ use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{cfg_into_iter, collections::BTreeMap, rand::RngCore, vec, vec::Vec, UniformRand}; use digest::Digest; -use dock_crypto_utils::{ - commitment::PedersenCommitmentKey, expect_equality, serde_utils::ArkObjectBytes, -}; +use dock_crypto_utils::{expect_equality, serde_utils::ArkObjectBytes}; use schnorr_pok::compute_random_oracle_challenge; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use zeroize::{Zeroize, ZeroizeOnDrop}; #[cfg(feature = "parallel")] use rayon::prelude::*; -use serde::{Deserialize, Serialize}; -use serde_with::serde_as; -use zeroize::{Zeroize, ZeroizeOnDrop}; +pub const DEFAULT_SALT_SIZE: usize = 128; +pub const DEFAULT_DIGEST_SIZE: usize = 64; + +pub const DOMAIN_SEPARATOR: &[u8] = b"abcp_dkg"; /// Share of the secret generated by a party #[serde_as] #[derive( - Default, Clone, Debug, PartialEq, @@ -40,71 +41,92 @@ use zeroize::{Zeroize, ZeroizeOnDrop}; Serialize, Deserialize, )] -pub struct VerifiableShare { +pub struct VerifiableShare { + /// Share id. Corresponds to the participant id. #[zeroize(skip)] pub id: ShareId, + /// At least `threshold` number of shares are needed to reconstruct the secret #[zeroize(skip)] pub threshold: ShareId, #[serde_as(as = "ArkObjectBytes")] pub share: F, - pub blinding: F, - pub blinding_prime: F, + #[serde_as(as = "[_; SALT_SIZE]")] + pub blinding: [u8; SALT_SIZE], + #[serde_as(as = "[_; SALT_SIZE]")] + pub blinding_prime: [u8; SALT_SIZE], } /// State of a party in Round 1. /// CMG is the group where commitments reside and PKG is the group of the public key #[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Round1> { +pub struct Round1< + PKG: AffineRepr, + const SALT_SIZE: usize = DEFAULT_SALT_SIZE, + const DIGEST_SIZE: usize = DEFAULT_DIGEST_SIZE, +> { pub id: ParticipantId, pub threshold: ShareId, pub secret: PKG::ScalarField, pub h: PKG, - pub shares: Vec>, - pub y_0: PKG::ScalarField, - pub y_0_prime: PKG::ScalarField, + pub shares: Vec>, + pub y_0: [u8; SALT_SIZE], + pub y_0_prime: [u8; SALT_SIZE], /// Stores broadcast messages received from other parties in this round - pub received_msgs: BTreeMap>, + pub received_msgs: BTreeMap>, } /// Message broadcasted by a party in Round 1 +/// CMG is the group where commitments reside and PKG is the group of the public key #[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Round1Msg> { +pub struct Round1Msg { pub sender_id: ParticipantId, - pub C: Vec, - pub C_prime: Vec, - pub C_0: PKG, - pub C_0_prime: PKG, - pub resp: DensePolynomial, + /// Commitments to the shares of the blinding + pub C: Vec<[u8; DIGEST_SIZE]>, + pub C_0: [u8; DIGEST_SIZE], + /// Commitments to the shares which will be opened in the second round + pub C_prime: Vec<[u8; DIGEST_SIZE]>, + pub C_0_prime: [u8; DIGEST_SIZE], + pub resp: DensePolynomial, } /// State of a party in Round 1. +/// CMG is the group where commitments reside and PKG is the group of the public key #[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Round2> { - pub round1_state: Round1, +pub struct Round2< + PKG: AffineRepr, + const SALT_SIZE: usize = DEFAULT_SALT_SIZE, + const DIGEST_SIZE: usize = DEFAULT_DIGEST_SIZE, +> { + pub round1_state: Round1, /// Stores broadcast messages received from other parties in this round - pub received_msgs: BTreeMap>, + pub received_msgs: BTreeMap>, /// Stores shares received from other parties in this round - pub received_shares: BTreeMap>, + pub received_shares: BTreeMap>, } /// Message broadcasted by a party in Round 2 #[derive(Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Round2Msg { +pub struct Round2Msg { pub sender_id: ParticipantId, pub h: PKG, - pub y_0: PKG::ScalarField, - pub y_0_prime: PKG::ScalarField, + // Question: Is `y_0` needed since `g, h`, etc will already be hashed into the challenge + pub y_0: [u8; SALT_SIZE], + pub y_0_prime: [u8; SALT_SIZE], } -impl> Round1 { - pub fn start<'a, R: RngCore, D: Digest>( +impl + Round1 +{ + /// At least `threshold` number of shares are needed to reconstruct the secret + /// `total` is the total number of participants + pub fn start<'a, R: RngCore, D: Digest, CMG: AffineRepr>( rng: &mut R, participant_id: ParticipantId, threshold: ShareId, total: ShareId, - comm_key: &PedersenCommitmentKey, + comm_key: CMG, pk_gen: impl Into<&'a PKG> + Clone, - ) -> Result<(Self, Round1Msg), SSError> { + ) -> Result<(Self, Round1Msg), SSError> { if participant_id == 0 || participant_id > total { return Err(SSError::InvalidParticipantId(participant_id)); } @@ -120,31 +142,26 @@ impl> Round1>(); let b_0 = b.coeffs[0]; let y = (0..total) - .map(|_| PKG::ScalarField::rand(rng)) + .map(|_| Self::get_random_salt(rng)) .collect::>(); let y_prime = (0..total) - .map(|_| PKG::ScalarField::rand(rng)) + .map(|_| Self::get_random_salt(rng)) .collect::>(); - let y_0 = PKG::ScalarField::rand(rng); - let y_0_prime = PKG::ScalarField::rand(rng); + let y_0 = Self::get_random_salt(rng); + let y_0_prime = Self::get_random_salt(rng); let pk_gen = pk_gen.into().into_group(); let h = pk_gen * secret; - let C_0 = (pk_gen * (b_0 * y_0)).into_affine(); - let C_0_prime = ((pk_gen + h) * y_0_prime).into_affine(); - let C = CMG::Group::normalize_batch( - &cfg_into_iter!(0..total as usize) - .map(|i| comm_key.commit_as_projective(&b_evals[i], &y[i])) - .collect::>(), - ); - let C_prime = CMG::Group::normalize_batch( - &cfg_into_iter!(0..total as usize) - .map(|i| comm_key.commit_as_projective(&shares.0[i].share, &y_prime[i])) - .collect::>(), - ); + let C_0 = hash_commitment::<_, D, DIGEST_SIZE>(pk_gen * b_0, &y_0); + let C_0_prime = hash_commitment::<_, D, DIGEST_SIZE>(pk_gen + h, &y_0_prime); + let C = cfg_into_iter!(0..total as usize) + .map(|i| hash_commitment::<_, D, DIGEST_SIZE>(comm_key * b_evals[i], &y[i])) + .collect::>(); + let C_prime = cfg_into_iter!(0..total as usize) + .map(|i| hash_commitment::<_, D, DIGEST_SIZE>(shares.0[i].share, &y_prime[i])) + .collect::>(); let mut chal_bytes = vec![]; - comm_key.g.serialize_compressed(&mut chal_bytes)?; - comm_key.h.serialize_compressed(&mut chal_bytes)?; + comm_key.serialize_compressed(&mut chal_bytes)?; C_0.serialize_compressed(&mut chal_bytes)?; C_0_prime.serialize_compressed(&mut chal_bytes)?; for i in 0..C.len() { @@ -185,7 +202,10 @@ impl> Round1) -> Result<(), SSError> { + pub fn add_received_message( + &mut self, + msg: Round1Msg, + ) -> Result<(), SSError> { if msg.sender_id == self.id { return Err(SSError::SenderIdSameAsReceiver(msg.sender_id, self.id)); } @@ -212,7 +232,15 @@ impl> Round1 Result<(Round2, Round2Msg), SSError> { + pub fn finish( + self, + ) -> Result< + ( + Round2, + Round2Msg, + ), + SSError, + > { // +1 because `self.received_msgs` does not contain message from itself if self.threshold > (self.received_msgs.len() as ParticipantId + 1) { return Err(SSError::BelowThreshold( @@ -234,10 +262,18 @@ impl> Round1(rng: &mut R) -> [u8; SALT_SIZE] { + let mut s = [0; SALT_SIZE]; + rng.fill_bytes(&mut s); + s + } } -impl> Round2 { - pub fn add_received_message(&mut self, msg: Round2Msg) -> Result<(), SSError> { +impl + Round2 +{ + pub fn add_received_message(&mut self, msg: Round2Msg) -> Result<(), SSError> { if self.round1_state.id == msg.sender_id { return Err(SSError::SenderIdSameAsReceiver( self.round1_state.id, @@ -254,11 +290,11 @@ impl> Round2( + pub fn add_received_share<'a, D: Digest, CMG: AffineRepr>( &mut self, sender_id: ParticipantId, - share: VerifiableShare, - comm_key: &PedersenCommitmentKey, + share: VerifiableShare, + comm_key: CMG, pk_gen: impl Into<&'a PKG> + Clone, ) -> Result<(), SSError> { if self.round1_state.id == sender_id { @@ -276,7 +312,7 @@ impl> Round2(sender_id, &share, comm_key, pk_gen)?; + self.verify_share::(sender_id, &share, comm_key, pk_gen)?; self.received_shares.insert(sender_id, share); Ok(()) } @@ -304,11 +340,11 @@ impl> Round2( + pub fn verify_share<'a, D: Digest, CMG: AffineRepr>( &self, sender_id: ParticipantId, - share: &VerifiableShare, - comm_key: &PedersenCommitmentKey, + share: &VerifiableShare, + comm_key: CMG, pk_gen: impl Into<&'a PKG> + Clone, ) -> Result<(), SSError> { let round1_msg = self @@ -321,18 +357,19 @@ impl> Round2(share.share, &share.blinding_prime) + != round1_msg.C_prime[self_idx] { return Err(SSError::InvalidShare); } let pk_gen = *pk_gen.into(); - if (pk_gen + round2_msg.h) * round2_msg.y_0_prime != round1_msg.C_0_prime.into_group() { + if hash_commitment::<_, D, DIGEST_SIZE>(pk_gen + round2_msg.h, &round2_msg.y_0_prime) + != round1_msg.C_0_prime + { return Err(SSError::InvalidShare); } let mut chal_bytes = vec![]; - comm_key.g.serialize_compressed(&mut chal_bytes)?; - comm_key.h.serialize_compressed(&mut chal_bytes)?; + comm_key.serialize_compressed(&mut chal_bytes)?; round1_msg.C_0.serialize_compressed(&mut chal_bytes)?; round1_msg.C_0_prime.serialize_compressed(&mut chal_bytes)?; for i in 0..round1_msg.C.len() { @@ -341,16 +378,18 @@ impl> Round2(&chal_bytes); let h_prime = pk_gen * round1_msg.resp.coeffs[0] + round2_msg.h * d; - if round1_msg.C_0.into_group() != h_prime * round2_msg.y_0 { + if round1_msg.C_0 != hash_commitment::<_, D, DIGEST_SIZE>(h_prime, &round2_msg.y_0) { return Err(SSError::InvalidShare); } - if comm_key.commit_as_projective( - &(round1_msg - .resp - .evaluate(&CMG::ScalarField::from(self.round1_state.id)) - + share.share * d), - &share.blinding, - ) != round1_msg.C[self_idx].into_group() + if round1_msg.C[self_idx] + != hash_commitment::<_, D, DIGEST_SIZE>( + comm_key + * (round1_msg + .resp + .evaluate(&PKG::ScalarField::from(self.round1_state.id)) + + share.share * d), + &share.blinding, + ) { return Err(SSError::InvalidShare); } @@ -371,11 +410,26 @@ impl> Round2 Option<&VerifiableShare> { + ) -> Option<&VerifiableShare> { self.received_shares.get(&id) } } +/// Commit to the message and salt using a hash function. Hash function is used because these commitments +/// are verified using a commit and reveal approach. +pub fn hash_commitment( + msg: T, + salt: &[u8], +) -> [u8; DIGEST_SIZE] { + let serz_size = T::compressed_size(&msg); + let mut bytes = Vec::with_capacity(serz_size + salt.len() + DOMAIN_SEPARATOR.len()); + msg.serialize_compressed(&mut bytes).unwrap(); + bytes.extend_from_slice(DOMAIN_SEPARATOR); + bytes.extend_from_slice(salt); + let d = D::digest(&bytes); + d.as_slice().try_into().expect("Wrong length") +} + #[cfg(test)] pub mod tests { use super::*; @@ -389,13 +443,13 @@ pub mod tests { #[test] fn distributed_key_generation() { let mut rng = StdRng::seed_from_u64(0u64); - let ped_comm_key = PedersenCommitmentKey::::new::(b"test"); + let comm_key = G1Affine::rand(&mut rng); let pk_gen_g1 = G1Affine::rand(&mut rng); let pk_gen_g2 = G2Affine::rand(&mut rng); - fn check>( + fn check>( rng: &mut StdRng, - ped_comm_key: &PedersenCommitmentKey, + comm_key: CMG, pk_gen: &PKG, ) { for (threshold, total) in vec![ @@ -430,15 +484,20 @@ pub mod tests { // Each participant starts Round1 for i in 1..=total { let start = Instant::now(); - let (round1, msgs) = Round1::start::<_, Blake2b512>( - rng, - i as ParticipantId, - threshold as ShareId, - total as ShareId, - ped_comm_key, - pk_gen, - ) - .unwrap(); + let (round1, msgs) = + Round1::<_, DEFAULT_SALT_SIZE, DEFAULT_DIGEST_SIZE>::start::< + _, + Blake2b512, + CMG, + >( + rng, + i as ParticipantId, + threshold as ShareId, + total as ShareId, + comm_key, + pk_gen, + ) + .unwrap(); round1_time += start.elapsed(); all_secrets.push(round1.secret.clone()); @@ -486,10 +545,10 @@ pub mod tests { if i != j { let share = all_round2s[j].round1_state.shares[i].clone(); all_round2s[i] - .add_received_share::( + .add_received_share::( (j + 1) as ParticipantId, share, - ped_comm_key, + comm_key, pk_gen, ) .unwrap(); @@ -532,7 +591,7 @@ pub mod tests { } } - check(&mut rng, &ped_comm_key, &pk_gen_g1); - check(&mut rng, &ped_comm_key, &pk_gen_g2); + check(&mut rng, comm_key, &pk_gen_g1); + check(&mut rng, comm_key, &pk_gen_g2); } } diff --git a/secret_sharing_and_dkg/src/abcp_ni_tzk_extended.rs b/secret_sharing_and_dkg/src/abcp_ni_tzk_extended.rs new file mode 100644 index 00000000..642ee2d2 --- /dev/null +++ b/secret_sharing_and_dkg/src/abcp_ni_tzk_extended.rs @@ -0,0 +1,52 @@ +//! Generalization of the protocol for NI-TZK for discrete logarithms described in Fig 3 of the paper [VSS from Distributed ZK Proofs and Applications](https://eprint.iacr.org/2023/992.pdf) +//! Fig 3. describes a protocol for the prover sharing its witness `x` for the relation `h = g^x` among `n` parties such +//! that a threshold number, `t`, of them can reconstruct `x`. +//! +//! Following describes a protocol where the prover shares its witnesses `(x, y, z, ...)` for the relation `h = g_1^x.g_2^y.g_3^z...` where +//! party `i` has shares `(x_i, y_i, z_i, ..)` and any threshold number of them can reconstruct `(x, y, z, ...)`. It uses the +//! approach used when using the Schnorr proof of knowledge protocol to prove knowledge of the opening of a Pedersen commitment. +//! Note: I use multiplicative notation below. +//! +//! I describe the protocol for the relation `h = g_1^x.g_2^y` with 2 witnesses `x, y` below, but it can be extended to more witnesses easily. +//! +//! `HC` is a hash function used to commit to shares and the commitment is opened by revealing the shares. +//! `HC(x_i, .., salt) -> {0, 1}^*` where `x_i ...` can be field elements or group elements and `salt ∈ {0, 1}^*` +//! +//! 1. Prover samples polynomials of degree `t` (such `t+1` parties can reconstruct) `f_x(X), f_y(X)` such that `f_x(0) = x, f_y(0) = y`. +//! 2. Prover samples random blinding polynomials of degree `t` as `b_x(X), b_y(X)`. +//! 3. Prover commits to `b_x(0), b_y(0)` as `C_0 = HC(g_1^b_x(0).g_2^b_y(0), k_0)` where `k_0` is a random salt. +//! 4. For shareholder `i`, the prover +//! 4.1 picks random salts `k_i <- {0, 1}^*, k'_i <- {0, 1}^*` +//! 4.2 commits to `b_x(i), b_y(i)` as `C_i = HC(g_1^b_x(i).g_2^b_y(i), k_i)` +//! 4.3 commits to `x_i=f_x(i), y_i=f_y(i)` as `C'_i = HC(x_i, y_i, k'_i)`. +//! 5. Prover hashes all `C_i`, `C'_i` for `i` = 0 to `n` and instance variables like `h, g_1, g_2`, etc to create challenge `d`. +//! 6. Prover creates response polynomials `r_x(X) = b_x(X) - d.f_x(X), r_y(X) = b_y(X) - d.f_y(X)`. +//! 7. Prover broadcasts all `C_i`, `C'_i` for `i` = 0 to `n`, `k_0` and polynomials `r_x, r_y` and sends `(x_i, y_i, k_i, k'_i)` to shareholder `i` on a private channel. +//! 8. Each shareholder constructs challenge `d` in the same way as prover in step 5. +//! 9. Shareholder `i` verifies +//! 9.1 `C'_i = HC(x_i, y_i, k'_i)` +//! 9.2 `C_i == HC(g_1^{r_x(i) + d.x_i}.g_2^{r_y(i) + d.y_i}, k_i)` (his own share is correct) +//! 9.3 `C_0 == HC(g_1^r_x(0).g_2^r_y(0).h^d, k_0)` (share is part of the original witness) +//! +//! Note that I omit commitment `C'_0` created in the paper as all the instance variables are anyway hashed into the challenge `d` +//! +//! Following is modification of the above protocol where prover only wants to share the witness `x` of the Pedersen commitment `h = g_1^x.g_2^y`. +//! +//! 1. Prover samples polynomial of degree `t` (such `t+1` parties can reconstruct) `f_x(X)` such that `f_x(0) = x`. +//! 2. Prover samples random blinding polynomial of degree `t` as `b_x(X)` and a random `j <- Z_p` +//! 3. Prover commits to `b_x(0)` as `C_0 = HC(g_1^b_x(0).g_2^j, k_0)` where `k_0` is a random salt. +//! 4. For shareholder `i`, the prover +//! 4.1 picks random salts `k_i <- {0, 1}^*, k'_i <- {0, 1}^*` +//! 4.2 commits to `b_x(i)` as `C_i = HC(g_1^b_x(i), k_i)` +//! 4.3 commits to `x_i=f_x(i)` as `C'_i = HC(x_i, k'_i)`. +//! 5. Prover hashes all `C_i`, `C'_i` for `i` = 0 to `n` and instance variables like `h, g_1, g_2`, etc to create challenge `d`. +//! 6. Prover creates response polynomial `r_x(X) = b_x(X) - d.f_x(X)` and `s = j - d.y`. +//! 7. Prover broadcasts all `C_i`, `C'_i` for `i` = 0 to `n`, `k_0`, `s` and polynomial `r_x` and sends `(x_i, k_i, k'_i)` to shareholder `i` on a private channel. +//! 8. Each shareholder constructs challenge `d` in the same way as prover in step 5. +//! 9. Shareholder `i` verifies +//! 9.1 `C'_i = HC(x_i, k'_i)` +//! 9.2 `C_i == HC(g_1^{r_x(i) + d.x_i}, k_i)` (his own share is correct) +//! 9.3 `C_0 == HC(g_1^r_x(0).g_2^s.h^d, k_0)` (share is part of the original witness) +//! + +// TODO: Implement these diff --git a/secret_sharing_and_dkg/src/baghery_feldman_vss.rs b/secret_sharing_and_dkg/src/baghery_feldman_vss.rs deleted file mode 100644 index 87bebb7f..00000000 --- a/secret_sharing_and_dkg/src/baghery_feldman_vss.rs +++ /dev/null @@ -1,219 +0,0 @@ -//! Feldman's Verifiable Secret Sharing Scheme, with faster verification but slower sharing, by K. Baghery. -//! As described in Fig 3 of the paper [A Unified Framework for Verifiable Secret Sharing](https://eprint.iacr.org/2023/1669) - -use crate::{ - common::{Share, ShareId, Shares}, - error::SSError, - shamir_ss, -}; -use ark_ec::{AffineRepr, CurveGroup}; -use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::{cfg_into_iter, rand::RngCore, vec, vec::Vec, UniformRand}; -use digest::Digest; -use dock_crypto_utils::{commitment::PedersenCommitmentKey, serde_utils::ArkObjectBytes}; -use schnorr_pok::compute_random_oracle_challenge; -use serde::{Deserialize, Serialize}; -use serde_with::serde_as; - -#[cfg(feature = "parallel")] -use rayon::prelude::*; - -/// Proof that the dealer shared the secret correctly. -#[serde_as] -#[derive( - Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, -)] -#[serde(bound = "")] -pub struct Proof { - #[serde_as(as = "Vec")] - pub commitments: Vec, - #[serde_as(as = "ArkObjectBytes")] - pub resp: DensePolynomial, - #[serde_as(as = "ArkObjectBytes")] - pub challenge: G::ScalarField, -} - -/// Generate a random secret with its shares according to Shamir's secret sharing. -/// At least `threshold` number of shares are needed to reconstruct the secret. -/// Returns the secret, shares, the polynomial and proof to verify the correct sharing -pub fn deal_random_secret( - rng: &mut R, - threshold: ShareId, - total: ShareId, - comm_key: &PedersenCommitmentKey, -) -> Result< - ( - G::ScalarField, - Shares, - DensePolynomial, - Proof, - ), - SSError, -> { - let secret = G::ScalarField::rand(rng); - let (shares, sharing_poly, proof) = - deal_secret::<_, _, D>(rng, secret, threshold, total, comm_key)?; - Ok((secret, shares, sharing_poly, proof)) -} - -/// Same as `deal_random_secret` above but accepts the secret to share -pub fn deal_secret( - rng: &mut R, - secret: G::ScalarField, - threshold: ShareId, - total: ShareId, - comm_key: &PedersenCommitmentKey, -) -> Result< - ( - Shares, - DensePolynomial, - Proof, - ), - SSError, -> { - let (shares, f) = shamir_ss::deal_secret(rng, secret, threshold, total)?; - let r = as DenseUVPolynomial>::rand( - threshold as usize - 1, - rng, - ); - debug_assert_eq!(f.degree(), r.degree()); - let r_evals = cfg_into_iter!(1..=total) - .map(|i| r.evaluate(&G::ScalarField::from(i))) - .collect::>(); - let commitments = G::Group::normalize_batch( - &cfg_into_iter!(0..total as usize) - .map(|i| comm_key.commit_as_projective(&shares.0[i].share, &r_evals[i])) - .collect::>(), - ); - let mut chal_bytes = vec![]; - comm_key.g.serialize_compressed(&mut chal_bytes)?; - comm_key.h.serialize_compressed(&mut chal_bytes)?; - for c in &commitments { - c.serialize_compressed(&mut chal_bytes)?; - } - let d = compute_random_oracle_challenge::(&chal_bytes); - let z = r + (&f * d); - Ok(( - shares, - f, - Proof { - commitments, - resp: z, - challenge: d, - }, - )) -} - -impl Proof { - pub fn verify( - &self, - share: &Share, - comm_key: &PedersenCommitmentKey, - ) -> Result<(), SSError> { - if self.resp.degree() != share.threshold as usize - 1 { - return Err(SSError::DoesNotSupportThreshold(share.threshold)); - } - let mut chal_bytes = vec![]; - comm_key.g.serialize_compressed(&mut chal_bytes)?; - comm_key.h.serialize_compressed(&mut chal_bytes)?; - for c in &self.commitments { - c.serialize_compressed(&mut chal_bytes)?; - } - let d = compute_random_oracle_challenge::(&chal_bytes); - let r = self.resp.evaluate(&G::ScalarField::from(share.id)) - d * share.share; - if self.commitments[share.id as usize - 1] != comm_key.commit(&share.share, &r) { - return Err(SSError::InvalidShare); - } - Ok(()) - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use ark_bls12_381::{G1Affine, G2Affine}; - use ark_ff::One; - use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress}; - use ark_std::rand::{rngs::StdRng, SeedableRng}; - use blake2::Blake2b512; - use std::time::Instant; - use test_utils::test_serialization; - - #[test] - fn baghery_verifiable_secret_sharing() { - let mut rng = StdRng::seed_from_u64(0u64); - let comm_key1 = PedersenCommitmentKey::::new::(b"test"); - let comm_key2 = PedersenCommitmentKey::::new::(b"test"); - - fn check(rng: &mut StdRng, comm_key: &PedersenCommitmentKey) { - let mut checked_serialization = false; - for (threshold, total) in vec![ - (2, 2), - (2, 3), - (2, 4), - (2, 5), - (3, 3), - (3, 4), - (3, 5), - (4, 5), - (4, 8), - (4, 9), - (4, 12), - (5, 5), - (5, 7), - (5, 10), - (5, 13), - (7, 10), - (7, 15), - ] { - println!("For {}-of-{} sharing", threshold, total); - let start = Instant::now(); - let (secret, shares, _, proof) = deal_random_secret::<_, G, Blake2b512>( - rng, - threshold as ShareId, - total as ShareId, - &comm_key, - ) - .unwrap(); - println!("Time to create shares and proof {:?}", start.elapsed()); - println!( - "Proof size is {} bytes", - proof.serialized_size(Compress::Yes) - ); - - let mut noted_time = false; - - for share in &shares.0 { - // Wrong share fails to verify - let mut wrong_share = share.clone(); - wrong_share.share += G::ScalarField::one(); - assert!(proof.verify::(&wrong_share, &comm_key).is_err()); - - // Correct share verifies - let start = Instant::now(); - proof.verify::(&share, &comm_key).unwrap(); - if !noted_time { - println!("Time to verify share is {:?}", start.elapsed()); - noted_time = true; - } - } - - // Its assumed that reconstructor verifies each share before calling `reconstruct_secret` - let s = shares.reconstruct_secret().unwrap(); - assert_eq!(s, secret); - - // Test serialization - if !checked_serialization { - test_serialization!(Shares, shares); - test_serialization!(Share, shares.0[0]); - test_serialization!(Proof, proof); - checked_serialization = true; - } - } - } - - check(&mut rng, &comm_key1); - check(&mut rng, &comm_key2); - } -} diff --git a/secret_sharing_and_dkg/src/baghery_vss.rs b/secret_sharing_and_dkg/src/baghery_vss.rs new file mode 100644 index 00000000..b5bbd7e1 --- /dev/null +++ b/secret_sharing_and_dkg/src/baghery_vss.rs @@ -0,0 +1,205 @@ +//! Verifiable Secret Sharing scheme inspired by Feldman's, but using hash function for commitment instead of elliptic curve operations. By K. Baghery. +//! +//! As described by `Π_LA`, in Fig 5 of the paper [A Unified Framework for Verifiable Secret Sharing](https://eprint.iacr.org/2023/1669) + +use crate::{ + common::{Share, ShareId, Shares}, + error::SSError, + shamir_ss, +}; +use ark_ff::PrimeField; +use ark_poly::{univariate::DensePolynomial, DenseUVPolynomial, Polynomial}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{cfg_into_iter, rand::RngCore, vec, vec::Vec}; +use digest::Digest; +use dock_crypto_utils::serde_utils::ArkObjectBytes; +use schnorr_pok::compute_random_oracle_challenge; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +#[cfg(feature = "parallel")] +use rayon::prelude::*; + +pub const DEFAULT_DIGEST_SIZE: usize = 64; +pub const DOMAIN_SEPARATOR: &[u8] = b"PI_LA"; + +/// Proof that the dealer shared the secret correctly. +#[serde_as] +#[derive( + Clone, Debug, PartialEq, Eq, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, +)] +#[serde(bound = "")] +pub struct Proof { + #[serde_as(as = "Vec<[_; DIGEST_SIZE]>")] + pub commitments: Vec<[u8; DIGEST_SIZE]>, + #[serde_as(as = "ArkObjectBytes")] + pub resp: DensePolynomial, + #[serde_as(as = "ArkObjectBytes")] + pub challenge: F, +} + +/// Generate a random secret with its shares according to Shamir's secret sharing. +/// At least `threshold` number of shares are needed to reconstruct the secret. +/// Returns the secret, shares, the polynomial and proof to verify the correct sharing +pub fn deal_random_secret( + rng: &mut R, + threshold: ShareId, + total: ShareId, +) -> Result<(F, Shares, DensePolynomial, Proof), SSError> { + let secret = F::rand(rng); + let (shares, sharing_poly, proof) = + deal_secret::<_, _, D, DIGEST_SIZE>(rng, secret, threshold, total)?; + Ok((secret, shares, sharing_poly, proof)) +} + +/// Same as `deal_random_secret` above but accepts the secret to share +pub fn deal_secret( + rng: &mut R, + secret: F, + threshold: ShareId, + total: ShareId, +) -> Result<(Shares, DensePolynomial, Proof), SSError> { + let (shares, f) = shamir_ss::deal_secret(rng, secret, threshold, total)?; + let r = as DenseUVPolynomial>::rand(threshold as usize - 1, rng); + debug_assert_eq!(f.degree(), r.degree()); + let commitments = cfg_into_iter!(0..total as usize) + .map(|i| { + hash_commitment::<_, D, DIGEST_SIZE>( + shares.0[i].share, + r.evaluate(&F::from(i as u64 + 1)), + ) + }) + .collect::>(); + let mut chal_bytes = vec![]; + for c in &commitments { + c.serialize_compressed(&mut chal_bytes)?; + } + let d = compute_random_oracle_challenge::(&chal_bytes); + let z = r + (&f * d); + Ok(( + shares, + f, + Proof { + commitments, + resp: z, + challenge: d, + }, + )) +} + +impl Proof { + pub fn verify(&self, share: &Share) -> Result<(), SSError> { + if self.resp.degree() != share.threshold as usize - 1 { + return Err(SSError::DoesNotSupportThreshold(share.threshold)); + } + let mut chal_bytes = vec![]; + for c in &self.commitments { + c.serialize_compressed(&mut chal_bytes)?; + } + let d = compute_random_oracle_challenge::(&chal_bytes); + let r = self.resp.evaluate(&F::from(share.id)) - d * share.share; + if self.commitments[share.id as usize - 1] + != hash_commitment::<_, D, DIGEST_SIZE>(share.share, r) + { + return Err(SSError::InvalidShare); + } + Ok(()) + } +} + +pub fn hash_commitment( + msg: T, + r: T, +) -> [u8; DIGEST_SIZE] { + let serz_size = T::compressed_size(&msg); + let mut bytes = Vec::with_capacity(serz_size * 2 + DOMAIN_SEPARATOR.len()); + msg.serialize_compressed(&mut bytes).unwrap(); + bytes.extend_from_slice(DOMAIN_SEPARATOR); + r.serialize_compressed(&mut bytes).unwrap(); + let d = D::digest(&bytes); + d.as_slice().try_into().expect("Wrong length") +} + +#[cfg(test)] +pub mod tests { + use super::*; + use ark_bls12_381::Fr; + use ark_ff::One; + use ark_serialize::{CanonicalDeserialize, CanonicalSerialize, Compress}; + use ark_std::rand::{rngs::StdRng, SeedableRng}; + use blake2::Blake2b512; + use std::time::Instant; + use test_utils::test_serialization; + + #[test] + fn baghery_verifiable_secret_sharing() { + let mut rng = StdRng::seed_from_u64(0u64); + let mut checked_serialization = false; + for (threshold, total) in vec![ + (2, 2), + (2, 3), + (2, 4), + (2, 5), + (3, 3), + (3, 4), + (3, 5), + (4, 5), + (4, 8), + (4, 9), + (4, 12), + (5, 5), + (5, 7), + (5, 10), + (5, 13), + (7, 10), + (7, 15), + (15, 32), + (63, 128), + (255, 512), + ] { + println!("For {}-of-{} sharing", threshold, total); + let start = Instant::now(); + let (secret, shares, _, proof) = + deal_random_secret::<_, Fr, Blake2b512, DEFAULT_DIGEST_SIZE>( + &mut rng, + threshold as ShareId, + total as ShareId, + ) + .unwrap(); + println!("Time to create shares and proof {:?}", start.elapsed()); + println!( + "Proof size is {} bytes", + proof.serialized_size(Compress::Yes) + ); + + let mut noted_time = false; + + for share in &shares.0 { + // Wrong share fails to verify + let mut wrong_share = share.clone(); + wrong_share.share += Fr::one(); + assert!(proof.verify::(&wrong_share).is_err()); + + // Correct share verifies + let start = Instant::now(); + proof.verify::(&share).unwrap(); + if !noted_time { + println!("Time to verify share is {:?}", start.elapsed()); + noted_time = true; + } + } + + // Its assumed that reconstructor verifies each share before calling `reconstruct_secret` + let s = shares.reconstruct_secret().unwrap(); + assert_eq!(s, secret); + + // Test serialization + if !checked_serialization { + test_serialization!(Shares, shares); + test_serialization!(Share, shares.0[0]); + test_serialization!(Proof, proof); + checked_serialization = true; + } + } + } +} diff --git a/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs b/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs index 6f7c93e4..5c638cfd 100644 --- a/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs +++ b/secret_sharing_and_dkg/src/distributed_dlog_check/maliciously_secure.rs @@ -36,7 +36,7 @@ use rayon::prelude::*; macro_rules! impl_protocol { ( $(#[$protocol_doc:meta])* - $secret_share: ident, $secret_share_comm: ident, $computation_share: ident, $computation_share_proof: ident, $deal_secret: ident, $discrete_log_protocol: ident, $discrete_log_proof: ident, $secret_group: ty, $other_group: ty, $pairing: tt) => { + $secret_share: ident, $secret_share_comm: ident, $computation_share: ident, $computation_share_proof: ident, $deal_secret: ident, $discrete_log_protocol: ident, $discrete_log_proof: ident, $secret_group: path, $other_group: path, $pairing: tt) => { $(#[$protocol_doc])* #[serde_as] @@ -362,9 +362,6 @@ macro_rules! impl_protocol { }; } -type G1Af = E::G1Affine; -type G2Af = E::G2Affine; - impl_protocol!( /// Share of the secret when the elements to check the discrete log relation are in group G1 SecretShareG1, @@ -374,8 +371,8 @@ impl_protocol!( deal_secret_in_g1, PoKG1DiscreteLogInPairingProtocol, PoKG1DiscreteLogInPairing, - G1Af, - G2Af, + E::G1Affine, + E::G2Affine, pair_g2_g1 ); @@ -388,8 +385,8 @@ impl_protocol!( deal_secret_in_g2, PoKG2DiscreteLogInPairingProtocol, PoKG2DiscreteLogInPairing, - G2Af, - G1Af, + E::G2Affine, + E::G1Affine, pair_g1_g2 ); diff --git a/secret_sharing_and_dkg/src/lib.rs b/secret_sharing_and_dkg/src/lib.rs index 08e2d622..141f15e5 100644 --- a/secret_sharing_and_dkg/src/lib.rs +++ b/secret_sharing_and_dkg/src/lib.rs @@ -16,11 +16,12 @@ //! 1. [Distributed Key Generation from FROST](./src/frost_dkg.rs) //! 1. [Distributed discrete log (DLOG) check](./src/distributed_dlog_check) //! 1. [Publicly Verifiable Secret Sharing](./src/baghery_pvss) -//! +//! 1. [Verifiable Secret Sharing using hash-commitments](./src/baghery_vss.rs) pub mod abcp_dkg; -pub mod baghery_feldman_vss; +pub mod abcp_ni_tzk_extended; pub mod baghery_pvss; +pub mod baghery_vss; pub mod common; pub mod distributed_dlog_check; pub mod error; diff --git a/short_group_sig/src/weak_bb_sig.rs b/short_group_sig/src/weak_bb_sig.rs index 58f7a10e..316bd91e 100644 --- a/short_group_sig/src/weak_bb_sig.rs +++ b/short_group_sig/src/weak_bb_sig.rs @@ -4,7 +4,10 @@ use crate::{ common::{SignatureParams, SignatureParamsWithPairing}, error::ShortGroupSigError, }; -use ark_ec::{pairing::Pairing, AffineRepr}; +use ark_ec::{ + pairing::{Pairing, PairingOutput}, + AffineRepr, +}; use ark_ff::{Field, PrimeField, Zero}; use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; use ark_std::{rand::RngCore, vec::Vec}; @@ -79,6 +82,12 @@ impl PublicKeyG2 { } } +impl AsRef for PublicKeyG2 { + fn as_ref(&self) -> &E::G2Affine { + &self.0 + } +} + impl PublicKeyG1 { pub fn generate_using_secret_key>( secret_key: &SecretKey, @@ -107,27 +116,37 @@ impl SignatureG1 { /// Create a new signature pub fn new( message: &E::ScalarField, - sk: &SecretKey, - params: &SignatureParams, + sk: impl AsRef, + gen: impl AsRef, ) -> Self { - Self((params.g1 * ((sk.0 + message).inverse().unwrap())).into()) + Self((*gen.as_ref() * ((*sk.as_ref() + message).inverse().unwrap())).into()) } pub fn verify( &self, message: &E::ScalarField, - pk: &PublicKeyG2, + pk: impl AsRef, params: &SignatureParams, + ) -> Result<(), ShortGroupSigError> { + self.verify_given_destructured_params(message, pk, ¶ms.g1, params.g2) + } + + pub fn verify_given_destructured_params( + &self, + message: &E::ScalarField, + pk: impl AsRef, + g1: &E::G1Affine, + g2: E::G2Affine, ) -> Result<(), ShortGroupSigError> { if !self.is_non_zero() { return Err(ShortGroupSigError::ZeroSignature); } // Check e(sig, pk + g2*m) == e(g1, g2) => e(g1, g2) - e(sig, pk + g2*m) == 0 => e(g1, g2) + e(sig, -(pk + g2*m)) == 0 // gm = -g2*m - g2*x - let gm = params.g2 * message.neg() - pk.0; + let gm = g2 * message.neg() - pk.as_ref(); if !E::multi_pairing( - [E::G1Prepared::from(self.0), E::G1Prepared::from(params.g1)], - [E::G2Prepared::from(gm), E::G2Prepared::from(params.g2)], + [E::G1Prepared::from(self.0), E::G1Prepared::from(g1)], + [E::G2Prepared::from(gm), E::G2Prepared::from(g2)], ) .is_zero() { @@ -139,16 +158,26 @@ impl SignatureG1 { pub fn verify_given_sig_params_with_pairing( &self, message: &E::ScalarField, - pk: &PublicKeyG2, + pk: impl AsRef, params: &SignatureParamsWithPairing, + ) -> Result<(), ShortGroupSigError> { + self.verify_given_destructured_params_with_pairing(message, pk, params.g2, params.g1g2) + } + + pub fn verify_given_destructured_params_with_pairing( + &self, + message: &E::ScalarField, + pk: impl AsRef, + g2: E::G2Affine, + g1g2: PairingOutput, ) -> Result<(), ShortGroupSigError> { if !self.is_non_zero() { return Err(ShortGroupSigError::ZeroSignature); } // Check e(sig, pk + g2*m) == e(g1, g2) // gm = g2*m + g2*x - let gm = params.g2 * message + pk.0; - if E::pairing(E::G1Prepared::from(self.0), E::G2Prepared::from(gm)) != params.g1g2 { + let gm = g2 * message + pk.as_ref(); + if E::pairing(E::G1Prepared::from(self.0), E::G2Prepared::from(gm)) != g1g2 { return Err(ShortGroupSigError::InvalidSignature); } Ok(()) diff --git a/short_group_sig/src/weak_bb_sig_pok_cdh.rs b/short_group_sig/src/weak_bb_sig_pok_cdh.rs index e86b7875..7c00be22 100644 --- a/short_group_sig/src/weak_bb_sig_pok_cdh.rs +++ b/short_group_sig/src/weak_bb_sig_pok_cdh.rs @@ -63,12 +63,12 @@ impl PoKOfSignatureG1Protocol { g1: &E::G1Affine, ) -> Self { let sig_randomizer = E::ScalarField::rand(rng); - let sc_blinding = E::ScalarField::rand(rng); + let sig_randomizer_blinding = E::ScalarField::rand(rng); let msg_blinding = blinding.unwrap_or_else(|| E::ScalarField::rand(rng)); Self::init_with_given_randomness( sig_randomizer, msg_blinding, - sc_blinding, + sig_randomizer_blinding, signature, message, g1, @@ -79,7 +79,7 @@ impl PoKOfSignatureG1Protocol { pub fn init_with_given_randomness( sig_randomizer: E::ScalarField, msg_blinding: E::ScalarField, - sc_blinding: E::ScalarField, + sig_randomizer_blinding: E::ScalarField, signature: impl AsRef, message: E::ScalarField, g1: &E::G1Affine, @@ -92,7 +92,7 @@ impl PoKOfSignatureG1Protocol { let A_bar = g1.mul_bigint(sig_r) + A_prime_neg * message; let sc = PokTwoDiscreteLogsProtocol::init( sig_randomizer, - sc_blinding, + sig_randomizer_blinding, g1, message, msg_blinding, diff --git a/syra/README.md b/syra/README.md index fa01f2ce..fbd77145 100644 --- a/syra/README.md +++ b/syra/README.md @@ -7,41 +7,6 @@ unique user attribute and user uses this "signature" to create the pseudonym. Also implements the threshold issuance of SyRA signatures -### A more efficient protocol generating pseudonym and corresponding proof of knowledge - -This significantly reduces the number of pairings done by both the user and verifier as well as reducing the -storage and computation cost of user and issuer as the "user secret key" (issuer's signature) is a single group -element in group G1. _But this doesn't have a security proof yet and thus isn't implemented._ - -- Setup parameters: `g ∈ G1, g_hat ∈ G2` -- Issuer keys: secret `sk ∈ Z_p`, public `ivk_hat ∈ G2, ivk_hat = g_hat*sk` -- User gets from issuer a signature `usk ∈ G1, usk = g*{1/(sk+s)}` where `s ∈ Z_p` is the user's identity -- User and verifier hash context to `Z ∈ G2`. - -For the user's signature generation, the objective is that given usk, the user wants to prove 2 relations -1. `T = e(usk, Z)` where `T, Z` are public but usk is only known to the user. -2. User knows a valid `usk` and the `s` in `usk` without revealing `usk` and `usk` satisfies `e(usk, g_hat*s.ivk_hat) == e(g, g_hat)`. -And the user should prove that usk used in relation 1 and 2 are the same. - -Relation 1 can be proved by applying the folklore Schnorr protocol for discrete log to the pairing setting. Eg. i.e. given the prover and verifier both know `(Z, T)` and the prover additionally knows `usk`, prove that `e(usk, Z) = T`. -1. Prover chooses a random `R ∈ G1` and computes `K = e(R, Z)` -2. Verifier gives a challenge `c ∈ Z_p`. -3. Computes response `S ∈ G1, S = R + usk*c` and sends `(K, S)` to the verifier. -4. Verifier checks if `e(S, Z) = K + T*c`. This works because `e(S, Z) = e(R + usk*c, Z) = e(R, Z) + e(usk*c, Z) = K + c*e(usk, Z) = K + c*T`. - -`usk` is essentially a weak-BB signature so we can create a proof for relation 2 using the proof of knowledge of weak-BB signature protocol described -in section 2.4 of [this paper](http://library.usc.edu.ph/ACM/SIGSAC%202017/wpes/p123.pdf). Note that there is no pairing computation for prover and -only 1 for verifier (considering a pairing product). - -To prove `usk` is the same in both relations, the user chooses a random `r ∈ Z_p` and creates `V ∈ G1, V = usk*r` and `T' = e(V, Z) = T*r` and -proves knowledge of `r` in `T' = T*r`. Note that `V, r` are the same as the ones created in the proof of relation 2 and the user can prove that -`r` is the same. Also, the prover doesn't send `T'`, the verifier creates using `V` and `Z` as `T' = e(V, Z)`. - -Following is the detailed protocol for user's signature generation -1. User follows the above protocol for Relation 1 (verifier's challenge is generated through Fiat Shamir) and gets `T = e(usk, Z)` and proof `pi_1 = (K, S)`. -2. User picks a random `r ∈ Z_p`, creates `V, V' ∈ G1` as `V = usk*r, V' = V*-s * g*r, T' = T*r`. -3. User creates a proof `pi_2 = SPK{(s, r) : V' = V*-s * g*r ∧ T' = T*r}`. -4. User sends proof `pi_1, T, pi_2, V, V'` to the verifier. -5. Verifier creates `T' = e(V, Z)`, checks pi_1, pi_2 and `e(V', g_hat) == e(V, ivk_hat)`. +A more efficient protocol generating pseudonym and corresponding proof of knowledge is implemented in the module [pseudonym_alt](./src/pseudonym_alt.rs) diff --git a/syra/src/error.rs b/syra/src/error.rs index 08596562..3602ee99 100644 --- a/syra/src/error.rs +++ b/syra/src/error.rs @@ -1,13 +1,21 @@ use ark_serialize::SerializationError; +use schnorr_pok::error::SchnorrError; use short_group_sig::error::ShortGroupSigError; #[derive(Debug)] pub enum SyraError { InvalidProof, + SchnorrError(SchnorrError), ShortGroupSigError(ShortGroupSigError), Serialization(SerializationError), } +impl From for SyraError { + fn from(e: SchnorrError) -> Self { + Self::SchnorrError(e) + } +} + impl From for SyraError { fn from(e: ShortGroupSigError) -> Self { Self::ShortGroupSigError(e) diff --git a/syra/src/lib.rs b/syra/src/lib.rs index 87ad84c5..eb76e135 100644 --- a/syra/src/lib.rs +++ b/syra/src/lib.rs @@ -8,46 +8,11 @@ //! //! Also implements the threshold issuance of SyRA signatures //! -//! ### A more efficient protocol generating pseudonym and corresponding proof of knowledge -//! -//! This significantly reduces the number of pairings done by both the user and verifier as well as reducing the -//! storage and computation cost of user and issuer as the "user secret key" (issuer's signature) is a single group -//! element in group G1. _But this doesn't have a security proof yet and thus isn't implemented._ -//! -//! - Setup parameters: `g ∈ G1, g_hat ∈ G2` -//! - Issuer keys: secret `sk ∈ Z_p`, public `ivk_hat ∈ G2, ivk_hat = g_hat*sk` -//! - User gets from issuer a signature `usk ∈ G1, usk = g*{1/(sk+s)}` where `s ∈ Z_p` is the user's identity -//! - User and verifier hash context to `Z ∈ G2`. -//! -//! For the user's signature generation, the objective is that given usk, the user wants to prove 2 relations -//! 1. `T = e(usk, Z)` where `T, Z` are public but usk is only known to the user. -//! 2. User knows a valid `usk` and the `s` in `usk` without revealing `usk` and `usk` satisfies `e(usk, g_hat*s.ivk_hat) == e(g, g_hat)`. -//! And the user should prove that usk used in relation 1 and 2 are the same. -//! -//! Relation 1 can be proved by applying the folklore Schnorr protocol for discrete log to the pairing setting. Eg. i.e. given the prover and verifier both know `(Z, T)` and the prover additionally knows `usk`, prove that `e(usk, Z) = T`. -//! 1. Prover chooses a random `R ∈ G1` and computes `K = e(R, Z)` -//! 2. Verifier gives a challenge `c ∈ Z_p`. -//! 3. Computes response `S ∈ G1, S = R + usk*c` and sends `(K, S)` to the verifier. -//! 4. Verifier checks if `e(S, Z) = K + T*c`. This works because `e(S, Z) = e(R + usk*c, Z) = e(R, Z) + e(usk*c, Z) = K + c*e(usk, Z) = K + c*T`. -//! -//! `usk` is essentially a weak-BB signature so we can create a proof for relation 2 using the proof of knowledge of weak-BB signature protocol described -//! in section 2.4 of [this paper](http://library.usc.edu.ph/ACM/SIGSAC%202017/wpes/p123.pdf). Note that there is no pairing computation for prover and -//! only 1 for verifier (considering a pairing product). -//! -//! To prove `usk` is the same in both relations, the user chooses a random `r ∈ Z_p` and creates `V ∈ G1, V = usk*r` and `T' = e(V, Z) = T*r` and -//! proves knowledge of `r` in `T' = T*r`. Note that `V, r` are the same as the ones created in the proof of relation 2 and the user can prove that -//! `r` is the same. Also, the prover doesn't send `T'`, the verifier creates using `V` and `Z` as `T' = e(V, Z)`. -//! -//! Following is the detailed protocol for user's signature generation -//! 1. User follows the above protocol for Relation 1 (verifier's challenge is generated through Fiat Shamir) and gets `T = e(usk, Z)` and proof `pi_1 = (K, S)`. -//! 2. User picks a random `r ∈ Z_p`, creates `V, V' ∈ G1` as `V = usk*r, V' = V*-s * g*r, T' = T*r`. -//! 3. User creates a proof `pi_2 = SPK{(s, r) : V' = V*-s * g*r ∧ T' = T*r}`. -//! 4. User sends proof `pi_1, T, pi_2, V, V'` to the verifier. -//! 5. Verifier creates `T' = e(V, Z)`, checks pi_1, pi_2 and `e(V', g_hat) == e(V, ivk_hat)`. -//! +//! A more efficient protocol generating pseudonym and corresponding proof of knowledge is implemented in the module [pseudonym_alt](./src/pseudonym_alt.rs) pub mod error; pub mod pseudonym; +pub mod pseudonym_alt; pub mod setup; -mod threshold_issuance; +pub mod threshold_issuance; pub mod vrf; diff --git a/syra/src/pseudonym.rs b/syra/src/pseudonym.rs index 7923fc6e..074f5c0e 100644 --- a/syra/src/pseudonym.rs +++ b/syra/src/pseudonym.rs @@ -1,6 +1,6 @@ //! Generate a pseudonym. This is generated by following the protocol described in section 4 of the paper. //! -//! Notation +//! Notation: Multiplicative notation is used //! ```docs //! T = e(Z, usk_hat) //! A = e(Z, W_hat) @@ -21,28 +21,28 @@ //! with the ones in `H`. Following is the detailed protocol with P and V denoting the prover and verifier respectively. //! //! 1. P chooses random `r_1, r_2` and computes `K_1 = F^s.G^r_1` and `K_2 = F^{β.s}.G^r_2`. Now `K_2 = E^s.G^{r_2 - α.s}`. Let `r3 = r2 - α*s` -//! 2. Now P starts executing the Schnorr protocol. It chooses random θ_1, θ_2, θ_3, R_1, R_2, R_3, R_4. +//! 2. Now P starts executing the Schnorr protocol. It chooses random `θ_1, θ_2, θ_3, R_1, R_2, R_3, R_4`. //! 3. P computes -//! T_1 = F^{θ_1}.G^{R_1} (for K_1) -//! T_2 = F^{θ_2}.G^{R_2} (for E) -//! T_3 = F^{θ_3}.G^{R_3} (for K_2) -//! T_4 = E^{θ_1}.G^{R_4} (for K_2) -//! T_5 = I^{θ_2}.F^{θ_3}.J^{θ_1} (for H) -//! 4. V gives the challenge c. -//! 5. P computes the following (s_i, t_i) and sends to V along with T_i. -//! s_1 = θ_1 + c.s -//! s_2 = θ_2 + c.β -//! s_3 = θ_3 + c.β.s -//! t_1 = R_1 + c.r_1 -//! t_2 = R_2 + c.α -//! t_3 = R_3 + c.r_2 -//! t_4 = R_4 + c(r_2 - α.s) +//! `T_1 = F^{θ_1}.G^{R_1} (for K_1)` +//! `T_2 = F^{θ_2}.G^{R_2} (for E)` +//! `T_3 = F^{θ_3}.G^{R_3} (for K_2)` +//! `T_4 = E^{θ_1}.G^{R_4} (for K_2)` +//! `T_5 = I^{θ_2}.F^{θ_3}.J^{θ_1} (for H)` +//! 4. V gives the challenge `c`. +//! 5. P computes the following `(s_i, t_i)` and sends to V along with `T_i`. +//! `s_1 = θ_1 + c.s` +//! `s_2 = θ_2 + c.β` +//! `s_3 = θ_3 + c.β.s` +//! `t_1 = R_1 + c.r_1` +//! `t_2 = R_2 + c.α` +//! `t_3 = R_3 + c.r_2` +//! `t_4 = R_4 + c(r_2 - α.s)` //! 6. V checks the following -//! T_1 == K_1^-c.F^{s_1}.G^{t_1} -//! T_2 == E^-c.F^{s_2}.G^{t_2} -//! T_3 == K_2^-c.F^{s_3}.G^{t_3} -//! T_4 == K_2^-c.E^{s_1}.G^{t_4} -//! T_5 == H^-c.I^{s_2}.F^{s_3}.J^{s_1} +//! `T_1 == K_1^-c.F^{s_1}.G^{t_1}` +//! `T_2 == E^-c.F^{s_2}.G^{t_2}` +//! `T_3 == K_2^-c.F^{s_3}.G^{t_3}` +//! `T_4 == K_2^-c.E^{s_1}.G^{t_4}` +//! `T_5 == H^-c.I^{s_2}.F^{s_3}.J^{s_1}` //! //! The implementation uses precomputation but if pre-computation is not done then some of these can utilize multi-pairings //! which are more efficient. But using precomputation is faster. @@ -61,7 +61,7 @@ use dock_crypto_utils::elgamal::Ciphertext as ElgamalCiphertext; use serde_with::serde_as; use zeroize::{Zeroize, ZeroizeOnDrop}; -/// Protocol to generate a pseudonym and the proof of correctness of it. +/// Protocol to generate a pseudonym and its proof of correctness. #[derive( Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop, CanonicalSerialize, CanonicalDeserialize, )] @@ -436,17 +436,21 @@ mod tests { // Signer creates user secret key let user_id = compute_random_oracle_challenge::(b"low entropy user-id"); + + let start = Instant::now(); let usk = UserSecretKey::new(user_id, &isk, params.clone()); + println!("Time to create user secret key {:?}", start.elapsed()); + + let start = Instant::now(); usk.verify(user_id, &ipk, params.clone()).unwrap(); + println!("Time to verify user secret key {:?}", start.elapsed()); // Verifier gives message and context to user let context = b"test-context"; let msg = b"test-message"; // Generate Z from context - let mut Z_bytes = vec![]; - Z_bytes.extend_from_slice(context); - let Z = affine_group_elem_from_try_and_incr::(&Z_bytes); + let Z = affine_group_elem_from_try_and_incr::(context); // User generates a pseudonym let start = Instant::now(); @@ -468,6 +472,7 @@ mod tests { let challenge_prover = compute_random_oracle_challenge::(&chal_bytes); let proof = protocol.gen_proof(&challenge_prover); println!("Time to create proof {:?}", start.elapsed()); + println!("Size of proof {} bytes", proof.compressed_size()); // Verifier checks the correctness of the pseudonym let start = Instant::now(); diff --git a/syra/src/pseudonym_alt.rs b/syra/src/pseudonym_alt.rs new file mode 100644 index 00000000..1f86e5e2 --- /dev/null +++ b/syra/src/pseudonym_alt.rs @@ -0,0 +1,335 @@ +//! A more efficient protocol generating pseudonym and corresponding proof of knowledge +//! +//! This significantly reduces the number of pairings done by both the user and verifier as well as reducing the +//! storage and computation cost of user and issuer as the "user secret key" (issuer's signature) is a single group +//! element in group G1. **But this doesn't have a security proof yet.** +//! +//! - Setup parameters: `g ∈ G1, g_hat ∈ G2` +//! - Issuer keys: secret `sk ∈ Z_p`, public `ivk_hat ∈ G2, ivk_hat = g_hat*sk` +//! - User gets from issuer a weak-BB signature `usk ∈ G1, usk = g*{1/(sk+s)}` where `s ∈ Z_p` is the user's identity +//! - User and verifier hash context to `Z ∈ G2`. +//! +//! For the user's signature generation, the objective is that given usk, the user wants to prove 2 relations +//! 1. `T = e(usk, Z)` where `T, Z` are public but usk is only known to the user. +//! 2. User knows a valid `usk` and the `s` in `usk` without revealing `usk` and `usk` satisfies `e(usk, g_hat*s + ivk_hat) == e(g, g_hat)`. +//! And the user should prove that usk used in relation 1 and 2 are the same. +//! +//! Relation 1 can be proved by applying the folklore Schnorr protocol for discrete log to the pairing setting. Eg. i.e. given the prover and +//! verifier both know `(Z, T)` and the prover additionally knows `usk`, prove that `e(usk, Z) = T`. +//! 1. Prover chooses a random `R ∈ G1` and computes `K = e(R, Z)` +//! 2. Verifier gives a challenge `c ∈ Z_p`. +//! 3. Computes response `S ∈ G1, S = R + usk*c` and sends `(K, S)` to the verifier. +//! 4. Verifier checks if `e(S, Z) = K + T*c`. This works because `e(S, Z) = e(R + usk*c, Z) = e(R, Z) + e(usk*c, Z) = K + c*e(usk, Z) = K + c*T`. +//! +//! `usk` is essentially a weak-BB signature so we can create a proof for relation 2 using the proof of knowledge of weak-BB signature protocol described +//! in section 2.4 of [this paper](http://library.usc.edu.ph/ACM/SIGSAC%202017/wpes/p123.pdf). Note that there is no pairing computation for prover and +//! only 1 for verifier (considering a pairing product). +//! +//! To prove `usk` is the same in both relations, the user chooses a random `r ∈ Z_p` and creates `V ∈ G1, V = usk*r` and `T' = e(V, Z) = T*r` and +//! proves knowledge of `r` in `T' = T*r`. Note that `V, r` are the same as the ones created in the proof of relation 2 and the user can prove that +//! `r` is the same. Also, the prover doesn't send `T'`, the verifier creates using `V` and `Z` as `T' = e(V, Z)`. +//! The idea is that since the user is already proving that `V` is randomized `usk`, the same `V` can also produce a randomized +//! pseudonym `T'` (similar to how the original weak-BB signature `usk` produced the original pseudonym `T`) and +//! user knows that randomizer `r`. +//! +//! +//! Following is the detailed protocol for user's signature generation +//! 1. User follows the above protocol for Relation 1 (verifier's challenge is generated through Fiat Shamir) and gets `T = e(usk, Z)` and proof `pi_1 = (K, S)`. +//! 2. User picks a random `r ∈ Z_p`, creates `V, V' ∈ G1` as `V = usk*r, V' = V*-s * g*r, T' = T*r`. +//! 3. User creates a proof `pi_2 = SPK{(s, r) : V' = V*-s * g*r ∧ T' = T*r}`. +//! 4. User sends proof `pi_1, T, pi_2, V, V'` to the verifier. +//! 5. Verifier creates `T' = e(V, Z)`, checks `pi_1, pi_2` and `e(V', g_hat) == e(V, ivk_hat)`. +//! + +use crate::{ + error::SyraError, + setup::{IssuerSecretKey, PreparedSetupParams, SetupParams}, +}; +use ark_ec::pairing::{Pairing, PairingOutput}; +use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; +use ark_std::{io::Write, rand::RngCore, vec::Vec, UniformRand}; +use schnorr_pok::discrete_log_pairing::{ + PoKG1DiscreteLogInPairing, PoKG1DiscreteLogInPairingProtocol, +}; +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; +use short_group_sig::{ + weak_bb_sig::{PublicKeyG2, SignatureG1}, + weak_bb_sig_pok_cdh::{PoKOfSignatureG1, PoKOfSignatureG1Protocol}, +}; +use zeroize::{Zeroize, ZeroizeOnDrop}; + +/// Issuer's public key +#[serde_as] +#[derive( + Clone, PartialEq, Eq, Debug, CanonicalSerialize, CanonicalDeserialize, Serialize, Deserialize, +)] +pub struct IssuerPublicKey(pub PublicKeyG2); + +/// User's secret key +#[serde_as] +#[derive( + Clone, + PartialEq, + Eq, + Debug, + CanonicalSerialize, + CanonicalDeserialize, + Serialize, + Deserialize, + Zeroize, + ZeroizeOnDrop, +)] +pub struct UserSecretKey(pub SignatureG1); + +impl IssuerPublicKey { + pub fn new(sk: &IssuerSecretKey, params: &SetupParams) -> Self { + Self(PublicKeyG2((params.g_hat * sk.0).into())) + } +} + +impl AsRef for IssuerPublicKey { + fn as_ref(&self) -> &E::G2Affine { + &self.0 .0 + } +} + +impl UserSecretKey { + pub fn new( + user_id: &E::ScalarField, + issuer_sk: &IssuerSecretKey, + params: &SetupParams, + ) -> Self { + Self(SignatureG1::new(user_id, issuer_sk, params)) + } + + pub fn verify( + &self, + user_id: E::ScalarField, + issuer_pk: &IssuerPublicKey, + params: impl Into>, + ) -> Result<(), SyraError> { + let params = params.into(); + self.0 + .verify_given_destructured_params_with_pairing( + &user_id, + &issuer_pk.0, + params.g_hat, + params.pairing, + ) + .map_err(|e| e.into()) + } +} + +impl AsRef for UserSecretKey { + fn as_ref(&self) -> &E::G1Affine { + &self.0 .0 + } +} + +/// Protocol to generate a pseudonym and its proof of correctness. +#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] +pub struct PseudonymGenProtocol { + pub pok_usk: PoKG1DiscreteLogInPairingProtocol, + pub pok_usk_bb_sig: PoKOfSignatureG1Protocol, + /// Pseudonym + #[zeroize(skip)] + pub T: PairingOutput, + /// `T*r` + #[zeroize(skip)] + pub T_prime: PairingOutput, + /// For proving knowledge of `r` in `T' = T * r`, prover picks blinding `l` and creates `T*l` as the first + /// step of Schnorr protocol. This `l` matches the blinding used in proof of knowledge of weak-BB sig + #[zeroize(skip)] + pub J: PairingOutput, +} + +/// This contains the pseudonym as well its proof of correctness +#[serde_as] +#[derive(Clone, PartialEq, Eq, Debug, CanonicalSerialize, CanonicalDeserialize)] +pub struct PseudonymProof { + pub pok_usk: PoKG1DiscreteLogInPairing, + pub pok_usk_bb_sig: PoKOfSignatureG1, + /// Pseudonym + pub T: PairingOutput, + pub J: PairingOutput, +} + +impl PseudonymGenProtocol { + pub fn init( + rng: &mut R, + Z: E::G2Affine, + s: E::ScalarField, + blinding: Option, + user_sk: UserSecretKey, + params: impl Into>, + ) -> Self { + let params = params.into(); + let T = E::pairing(E::G1Prepared::from(user_sk.0 .0), E::G2Prepared::from(Z)); + let r = E::ScalarField::rand(rng); + let r_blinding = E::ScalarField::rand(rng); + let msg_blinding = blinding.unwrap_or_else(|| E::ScalarField::rand(rng)); + let pok_usk = PoKG1DiscreteLogInPairingProtocol::init( + user_sk.0 .0.clone(), + E::G1Affine::rand(rng), + &Z, + ); + let pok_usk_bb_sig = PoKOfSignatureG1Protocol::init_with_given_randomness( + r, + msg_blinding, + r_blinding, + user_sk, + s, + ¶ms.g, + ); + let T_prime = T * r; + let J = T * r_blinding; + Self { + pok_usk, + pok_usk_bb_sig, + T, + T_prime, + J, + } + } + + pub fn challenge_contribution( + &self, + Z: &E::G2Affine, + issuer_pk: &IssuerPublicKey, + g: &E::G1Affine, + mut writer: W, + ) -> Result<(), SyraError> { + issuer_pk.serialize_compressed(&mut writer)?; + self.J.serialize_compressed(&mut writer)?; + self.pok_usk + .challenge_contribution(&Z, &self.T, &mut writer)?; + self.pok_usk_bb_sig.challenge_contribution(g, &mut writer)?; + Ok(()) + } + + pub fn gen_proof(self, challenge: &E::ScalarField) -> PseudonymProof { + let pok_usk = self.pok_usk.clone().gen_proof(challenge); + let pok_usk_bb_sig = self.pok_usk_bb_sig.clone().gen_proof(challenge); + PseudonymProof { + pok_usk, + pok_usk_bb_sig, + T: self.T, + J: self.J, + } + } +} + +impl PseudonymProof { + pub fn verify( + &self, + challenge: &E::ScalarField, + Z: E::G2Affine, + issuer_pk: &IssuerPublicKey, + params: impl Into>, + ) -> Result<(), SyraError> { + if !self.pok_usk.verify(&self.T, Z, challenge) { + return Err(SyraError::InvalidProof); + } + let T_prime = E::pairing(self.pok_usk_bb_sig.A_prime, Z); + if (T_prime * challenge) + self.J + != self.T * self.pok_usk_bb_sig.sc.as_ref().unwrap().response1 + { + return Err(SyraError::InvalidProof); + } + let params = params.into(); + self.pok_usk_bb_sig + .verify(challenge, issuer_pk.0 .0.clone(), ¶ms.g, params.g_hat) + .map_err(|e| e.into()) + } + + pub fn challenge_contribution( + &self, + Z: &E::G2Affine, + issuer_pk: &IssuerPublicKey, + g: &E::G1Affine, + mut writer: W, + ) -> Result<(), SyraError> { + issuer_pk.serialize_compressed(&mut writer)?; + self.J.serialize_compressed(&mut writer)?; + self.pok_usk + .challenge_contribution(&Z, &self.T, &mut writer)?; + self.pok_usk_bb_sig.challenge_contribution(g, &mut writer)?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ark_bls12_381::{Bls12_381, Fr, G2Affine}; + use ark_std::rand::{prelude::StdRng, SeedableRng}; + use blake2::Blake2b512; + use dock_crypto_utils::hashing_utils::affine_group_elem_from_try_and_incr; + use schnorr_pok::compute_random_oracle_challenge; + use std::time::Instant; + + #[test] + fn pseudonym() { + let mut rng = StdRng::seed_from_u64(0u64); + + let params = SetupParams::::new::(b"test"); + + // Signer's setup + let isk = IssuerSecretKey::new(&mut rng); + let ipk = IssuerPublicKey::new(&isk, ¶ms); + + // Signer creates user secret key + let user_id = compute_random_oracle_challenge::(b"low entropy user-id"); + + let start = Instant::now(); + let usk = UserSecretKey::new(&user_id, &isk, ¶ms); + println!("Time to create user secret key {:?}", start.elapsed()); + + let start = Instant::now(); + usk.verify(user_id, &ipk, params.clone()).unwrap(); + println!("Time to verify user secret key {:?}", start.elapsed()); + + // Verifier gives message and context to user + let context = b"test-context"; + let msg = b"test-message"; + + // Generate Z from context + let Z = affine_group_elem_from_try_and_incr::(context); + + // User generates a pseudonym + let start = Instant::now(); + let protocol = PseudonymGenProtocol::init( + &mut rng, + Z.clone(), + user_id.clone(), + None, + usk, + params.clone(), + ); + let mut chal_bytes = vec![]; + protocol + .challenge_contribution(&Z, &ipk, ¶ms.g, &mut chal_bytes) + .unwrap(); + // Add message to the transcript (message contributes to challenge) + chal_bytes.extend_from_slice(msg); + let challenge_prover = compute_random_oracle_challenge::(&chal_bytes); + let proof = protocol.gen_proof(&challenge_prover); + println!("Time to create proof {:?}", start.elapsed()); + println!("Size of proof {} bytes", proof.compressed_size()); + + // Verifier checks the correctness of the pseudonym + let start = Instant::now(); + let mut chal_bytes = vec![]; + proof + .challenge_contribution(&Z, &ipk, ¶ms.g, &mut chal_bytes) + .unwrap(); + // Add message to the transcript (message contributes to challenge) + chal_bytes.extend_from_slice(msg); + let challenge_verifier = compute_random_oracle_challenge::(&chal_bytes); + proof + .verify(&challenge_verifier, Z, &ipk, params.clone()) + .unwrap(); + println!("Time to verify proof {:?}", start.elapsed()); + } +} diff --git a/syra/src/setup.rs b/syra/src/setup.rs index a6144f87..c9f5a636 100644 --- a/syra/src/setup.rs +++ b/syra/src/setup.rs @@ -37,6 +37,12 @@ pub struct PreparedSetupParams { pub pairing: PairingOutput, } +impl AsRef for SetupParams { + fn as_ref(&self) -> &E::G1Affine { + &self.g + } +} + #[serde_as] #[derive( Clone, diff --git a/syra/src/threshold_issuance.rs b/syra/src/threshold_issuance.rs index 38122a2a..fd08d761 100644 --- a/syra/src/threshold_issuance.rs +++ b/syra/src/threshold_issuance.rs @@ -1,18 +1,20 @@ //! Threshold issuance in SyRA. The secret key is shared among signers using Shamir secret sharing and they jointly generate //! SyRA VRF. //! -//! SyRA VRF described in Fig. 4 of the paper is of the form `(g*1/(sk+s), g_hat*1/(sk+s))` where `sk` is the signer's secret key +//! Note: Multiplicative notation is used +//! +//! SyRA VRF described in Fig. 4 of the paper is of the form `(g^1/(sk+s), g_hat^1/(sk+s))` where `sk` is the signer's secret key //! and `s` is the user-id and `g, g_hat` are public parameters. This is similar to a weak-BB signature which has the form -//! `g*1/(sk+s)`. So SyRA VRF is essentially 2 weak-BB signatures. So I use the protocol for threshold weak-BB signature from +//! `g^1/(sk+s)`. So SyRA VRF is essentially 2 weak-BB signatures. So I use the protocol for threshold weak-BB signature from //! the corresponding package. //! //! The high level idea is: //! - The signers jointly compute a random value `r` such that each signer `i` has a share of it as `r_i` such that `r = \sum{r_i}` //! - The signers jointly compute a product of `u = r*(sk+s)` such that each signer `i` has a share of it as `u_i` such that `u = \sum{u_i}` -//! - Each signer sends to the user `R_i, R_hat_i, S_i, u_i` to the user where `R_i = g*r_i, R_hat_i = g_hat*r_i, S_i = e(g, g_hat)*r_i`. -//! - User combines these to form `R = \sum{R_i} = g*\sum{r_i} = g*r`, `R_hat = \sum{R_hat_i} = g_hat*\sum{r_i} = g_hat*r`, -//! `S = \sum{S_i} = e(g, g_hat)*\sum{r_i} = e(g, g_hat)*r` and `u = \sum{u_i} = r*(sk+s)`. Now `R * 1/u = g*1/(sk+s)`, -//! `R_hat * 1/u = g_hat*1/(sk+s)` and `S * 1/u = e(g, g_hat)*1/(sk+s)` +//! - Each signer sends to the user `R_i, R_hat_i, S_i, u_i` to the user where `R_i = g^r_i, R_hat_i = g_hat^r_i, S_i = e(g, g_hat)^r_i`. +//! - User combines these to form `R = \prod{R_i} = g^\prod{r_i} = g^r`, `R_hat = \prod{R_hat_i} = g_hat^\prod{r_i} = g_hat^r`, +//! `S = \prod{S_i} = e(g, g_hat)^\sum{r_i} = e(g, g_hat)^r` and `u = \sum{u_i} = r*(sk+s)`. Now `R^1/u = g^1/(sk+s)`, +//! `R_hat^1/u = g_hat^1/(sk+s)` and `S^1/u = e(g, g_hat)^1/(sk+s)` //! - User uses `R, R_hat, S` to verify its secret key as per Fig.4 //! //! The protocol proceeds in 2 phases: @@ -20,9 +22,9 @@ //! 1. **Phase 1**: This is a 2 round protocol, independent of the message `m` and generates randomness, like `r_i` (and other //! blindings to be used in MPC multiplication protocol). //! 2. **Phase 2**: Here the parties run a 2 round MPC multiplication protocol where each party's input is `(r_i, (sk_i + m))` and output -//! is `(g*r_i, g_hat*r_i, e(g, g_hat)*r_i, u_i)` where `u_i` is a share of `r*(sk+m)` such that `\sum{u_i} = r*(sk+m)`. -//! `(g*r_i, g_hat*r_i, e(g, g_hat)*r_i, u_i)` is called the `UserSecretKeyShare` and user can combine -//! these shares from all signers to get `g*1/(sk+m), g_hat*1/(sk+s), e(g, g_hat)*1/(sk+s)` as described above. +//! is `(g^r_i, g_hat^r_i, e(g, g_hat)^r_i, u_i)` where `u_i` is a share of `r*(sk+m)` such that `\sum{u_i} = r*(sk+m)`. +//! `(g^r_i, g_hat^r_i, e(g, g_hat)^r_i, u_i)` is called the `UserSecretKeyShare` and user can combine +//! these shares from all signers to get `g^1/(sk+m), g_hat^1/(sk+s), e(g, g_hat)^1/(sk+s)` as described above. use crate::{ error::SyraError, @@ -72,13 +74,13 @@ pub struct Phase2 { pub signer_id: ParticipantId, - /// `g*r_i` + /// `g^r_i` #[serde_as(as = "ArkObjectBytes")] pub R: E::G1Affine, - /// `g_hat*r_i` + /// `g_hat^r_i` #[serde_as(as = "ArkObjectBytes")] pub R_hat: E::G2Affine, - /// `e(g, g_hat)*r_i` + /// `e(g, g_hat)^r_i` #[serde_as(as = "ArkObjectBytes")] pub S: PairingOutput, /// Share of `r*(sk+s)` where `s` is user-id @@ -174,9 +176,9 @@ impl UserSecretKeyShare { let mut sum_S = PairingOutput::::zero(); let mut sum_u = E::ScalarField::zero(); // u = \sum_i{share_i.u} = r*(sk + s) - // R = \sum_i{share_i.R} / u = g * 1/(sk + s) - // R_hat = \sum_i{share_i.R_hat} / u = g_hat * 1/(sk + s) - // S = \sum_i{share_i.S} / u = e(g, h_hat) * 1/(sk + s) + // R = \prod_i{share_i.R} / u = g^1/(sk + s) + // R_hat = \prod_i{share_i.R_hat} / u = g_hat^1/(sk + s) + // S = \prod_i{share_i.S} / u = e(g, h_hat)^1/(sk + s) for share in shares { sum_R += share.R; sum_R_hat += share.R_hat; @@ -199,10 +201,7 @@ mod tests { use crate::setup::{IssuerPublicKey, IssuerSecretKey, SetupParams}; use ark_bls12_381::{Bls12_381, Fr}; - use ark_std::{ - rand::{rngs::StdRng, SeedableRng}, - UniformRand, - }; + use ark_std::rand::{rngs::StdRng, SeedableRng}; use blake2::Blake2b512; use schnorr_pok::compute_random_oracle_challenge; use secret_sharing_and_dkg::shamir_ss::deal_random_secret; @@ -374,10 +373,6 @@ mod tests { .collect::>(); // Public key created by the trusted party using the secret key directly. In practice, this will be a result of a DKG let threshold_ipk = IssuerPublicKey::new(&mut rng, &IssuerSecretKey(sk), ¶ms); - let ipks = isk_shares - .iter() - .map(|s| IssuerPublicKey::new(&mut rng, s, ¶ms)) - .collect::>(); // The signers run OT protocol instances. This is also a one time setup. let base_ot_outputs = do_pairwise_base_ot::( diff --git a/utils/src/solve_discrete_log.rs b/utils/src/solve_discrete_log.rs index 75eb3ff1..70396257 100644 --- a/utils/src/solve_discrete_log.rs +++ b/utils/src/solve_discrete_log.rs @@ -23,7 +23,7 @@ pub fn solve_discrete_log_brute_force( None } -/// Solve discrete log using Baby Step Giant Step as described in section 2 of https://eprint.iacr.org/2015/605 +/// Solve discrete log using Baby Step Giant Step as described in section 2 of /// `max` is the maximum value of the discrete log and this returns `x` such that `1 <= x <= max` and `base * x = target` /// if such `x` exists, else return None. pub fn solve_discrete_log_bsgs( @@ -35,7 +35,7 @@ pub fn solve_discrete_log_bsgs( solve_discrete_log_bsgs_inner(m, m, base, target) } -/// Solve discrete log using Baby Step Giant Step with worse worst-case performance but better average case performance as described in section 2 of https://eprint.iacr.org/2015/605. +/// Solve discrete log using Baby Step Giant Step with worse worst-case performance but better average case performance as described in section 2 of . /// `max` is the maximum value of the discrete log and this returns `x` such that `1 <= x <= max` and `base * x = target` /// if such `x` exists, else return None. pub fn solve_discrete_log_bsgs_alt( diff --git a/vb_accumulator/src/threshold/mod.rs b/vb_accumulator/src/threshold/mod.rs index 04c8bb27..51e0d738 100644 --- a/vb_accumulator/src/threshold/mod.rs +++ b/vb_accumulator/src/threshold/mod.rs @@ -1,280 +1,22 @@ //! Accumulator update, witness generation and updated witness generation in a threshold setting, i.e. where the //! accumulator secret key `alpha` is split among many accumulator managers using Shamir secret sharing. The general idea is: +//! //! 1. Accumulator value post deletion: Say the current accumulator value is `V` and the deleted element is `y`, //! then each manager creates shares `R_i = r_i * V` and `u_i = < share of r_i * (y + l_i * alpha_i)>` and sends to the user who //! then computes `\sum_i{V_i} * 1 / \sum_i{u_i}` to get `V * 1/(y + alpha)`. This also gives the membership witness of `y`. +//! //! 2. Witness generation: Say the current accumulator value is `V` and the user wants witness of `y` but does not want to //! reveal `y` to any manager. It gives shares of `y` to the managers such that each manager has `y_i` and `\sum_i{l_i * y_i} = y`. //! Now each manager shares `R_i = r_i * V` and `u_i = < share of r_i * l_i * (y_i + alpha_i)>` and sends to the user who //! then computes `\sum_i{V_i} * 1 / \sum_i{u_i}` to get `V * 1/(y + alpha)`. But here the user also needs to prove to each //! manager that share `y_i` is a valid share of `y` and this `y` is a member of the accumulator `V`. -// TODO: Use code threshold weak-BB crate and remove this duplication - -use ark_ec::{AffineRepr, CurveGroup}; -use ark_ff::{Field, PrimeField, Zero}; -use ark_serialize::{CanonicalDeserialize, CanonicalSerialize}; -use ark_std::{ - collections::{BTreeMap, BTreeSet}, - rand::RngCore, - vec::Vec, -}; -use digest::DynDigest; -use oblivious_transfer_protocols::{ - cointoss::Commitments, error::OTError, - ot_based_multiplication::batch_mul_multi_party::ParticipantOutput as MultOut, zero_sharing, - ParticipantId, -}; -use secret_sharing_and_dkg::error::SSError; - -use crate::error::VBAccumulatorError; - -/// Share created by a manager when `V * 1/ (y + alpha)` needs to computed and each manager knows `y` but -/// only a share of `alpha` -#[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct ShareOfKnownMember { - pub id: ParticipantId, - pub u: G::ScalarField, - pub R: G, -} - -/// Share created by a manager when `V * 1/ (y + alpha)` needs to computed and no manager knows `y` but -/// only a share of `y` and `alpha` -#[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct ShareOfSharedMember { - pub id: ParticipantId, - pub u: G::ScalarField, - pub R: G, -} - -#[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Phase1 { - pub id: ParticipantId, - pub r: F, - /// Protocols to generate shares of 0s. - pub zero_sharing_protocol: zero_sharing::Party, -} - -#[derive(Clone, Debug, PartialEq, CanonicalSerialize, CanonicalDeserialize)] -pub struct Phase1Output { - pub id: ParticipantId, - pub r: F, - pub masked_signing_key_shares: F, - pub masked_rs: F, - pub others: Vec, -} - -impl Phase1 { - pub fn get_comm_shares_and_salts_for_zero_sharing_protocol_with_other( - &self, - other_id: &ParticipantId, - ) -> Vec<(F, [u8; SALT_SIZE])> { - // TODO: Remove unwrap - self.zero_sharing_protocol - .cointoss_protocols - .get(other_id) - .unwrap() - .own_shares_and_salts - .clone() - } - - pub fn receive_commitment( - &mut self, - sender_id: ParticipantId, - comm_zero_share: Commitments, - ) -> Result<(), VBAccumulatorError> { - self.zero_sharing_protocol - .receive_commitment(sender_id, comm_zero_share)?; - Ok(()) - } - - pub fn receive_shares( - &mut self, - sender_id: ParticipantId, - zero_shares: Vec<(F, [u8; SALT_SIZE])>, - ) -> Result<(), VBAccumulatorError> { - self.zero_sharing_protocol - .receive_shares(sender_id, zero_shares)?; - Ok(()) - } - - pub fn compute_randomness_and_arguments_for_multiplication( - self, - signing_key: &F, - ) -> Result<(Vec, F, F), VBAccumulatorError> { - let others = self - .zero_sharing_protocol - .cointoss_protocols - .keys() - .map(|p| *p) - .collect::>(); - let zero_shares = self.zero_sharing_protocol.compute_zero_shares::()?; - let (masked_signing_key_share, masked_r) = compute_masked_arguments_to_multiply( - signing_key, - self.r, - zero_shares, - self.id, - &others, - )?; - Ok((others, masked_signing_key_share, masked_r)) - } - - pub fn ready_to_compute_randomness_and_arguments_for_multiplication(&self) -> bool { - self.zero_sharing_protocol - .has_shares_from_all_who_committed() - } -} - -impl Phase1 { - pub fn init( - rng: &mut R, - id: ParticipantId, - others: BTreeSet, - protocol_id: Vec, - ) -> Result<(Self, BTreeMap), VBAccumulatorError> { - if others.contains(&id) { - let e = OTError::ParticipantCannotBePresentInOthers(id); - return Err(VBAccumulatorError::OTError(e)); - } - let r = F::rand(rng); - let (zero_sharing_protocol, comm_zero_share) = - zero_sharing::Party::init(rng, id, 2, others, protocol_id); - Ok(( - Self { - id, - r, - zero_sharing_protocol, - }, - comm_zero_share, - )) - } - - pub fn finish( - self, - signing_key: &F, - ) -> Result, VBAccumulatorError> { - // TODO: Ensure every one has participated in both protocols - let id = self.id; - let r = self.r.clone(); - let (others, masked_signing_key_share, masked_r) = - self.compute_randomness_and_arguments_for_multiplication::(signing_key)?; - Ok(Phase1Output { - id, - r, - masked_signing_key_shares: masked_signing_key_share, - masked_rs: masked_r, - others, - }) - } -} - -impl ShareOfKnownMember { - pub fn new( - y: &G::ScalarField, - accum: &G, - phase1: &Phase1Output, - phase2: &MultOut, - ) -> Result { - let (R, u) = Self::compute_R_and_u( - accum, - y, - &phase1.r, - &phase1.masked_rs, - &phase1.masked_signing_key_shares, - phase2, - ); - Ok(Self { - id: phase1.id, - u, - R, - }) - } - - pub fn aggregate(shares: Vec) -> G { - let mut sum_R = G::Group::zero(); - let mut sum_u = G::ScalarField::zero(); - for share in shares.into_iter() { - sum_u += share.u; - sum_R += share.R; - } - (sum_R * sum_u.inverse().unwrap()).into_affine() - } - - fn compute_R_and_u( - base: &G, - y: &G::ScalarField, - r: &G::ScalarField, - masked_r: &G::ScalarField, - masked_signing_key_share: &G::ScalarField, - phase2: &MultOut, - ) -> (G, G::ScalarField) { - let R = base.mul(r).into_affine(); - let u = *masked_r * (*y + masked_signing_key_share) + phase2.compute_u(0); - (R, u) - } -} - -impl ShareOfSharedMember { - pub fn new( - accum: &G, - phase1: &Phase1Output, - phase2: &MultOut, - ) -> Result { - let (R, u) = Self::compute_R_and_u( - accum, - &phase1.r, - &phase1.masked_rs, - &phase1.masked_signing_key_shares, - phase2, - ); - Ok(Self { - id: phase1.id, - u, - R, - }) - } - - pub fn aggregate(shares: Vec) -> G { - let mut sum_R = G::Group::zero(); - let mut sum_u = G::ScalarField::zero(); - for share in shares.into_iter() { - sum_u += share.u; - sum_R += share.R; - } - (sum_R * sum_u.inverse().unwrap()).into_affine() - } - - fn compute_R_and_u( - base: &G, - r: &G::ScalarField, - masked_r: &G::ScalarField, - masked_signing_key_share: &G::ScalarField, - phase2: &MultOut, - ) -> (G, G::ScalarField) { - let R = base.mul(r).into_affine(); - let u = *masked_r * masked_signing_key_share + phase2.compute_u(0); - (R, u) - } -} - -pub fn compute_masked_arguments_to_multiply( - signing_key: &F, - r: F, - mut zero_shares: Vec, - self_id: ParticipantId, - others: &[ParticipantId], -) -> Result<(F, F), SSError> { - let beta = zero_shares.pop().unwrap(); - let alpha = zero_shares.pop().unwrap(); - let lambda = secret_sharing_and_dkg::common::lagrange_basis_at_0::(&others, self_id)?; - Ok((alpha + (lambda * signing_key), beta + r)) -} - #[cfg(test)] pub mod tests { - use super::*; use ark_bls12_381::{Bls12_381, Fr, G1Affine}; - use ark_ff::Zero; + use ark_ff::{PrimeField, Zero}; + use ark_std::{collections::BTreeSet, rand::RngCore, vec::Vec}; + use oblivious_transfer_protocols::ParticipantId; use std::time::Instant; use crate::{ @@ -284,20 +26,23 @@ pub mod tests { setup::{PublicKey, SecretKey}, }; use ark_std::{ - cfg_iter, rand::{rngs::StdRng, SeedableRng}, UniformRand, }; use blake2::Blake2b512; use oblivious_transfer_protocols::ot_based_multiplication::{ - batch_mul_multi_party::Participant as MultParty, dkls18_mul_2p::MultiplicationOTEParams, + base_ot_multi_party_pairwise::BaseOTOutput, dkls18_mul_2p::MultiplicationOTEParams, dkls19_batch_mul_2p::GadgetVector, }; use secret_sharing_and_dkg::shamir_ss::{deal_random_secret, deal_secret}; + use short_group_sig::threshold_weak_bb_sig::{Phase2, SigShare}; use test_utils::ot::do_pairwise_base_ot; - #[cfg(feature = "parallel")] - use rayon::prelude::*; + const BASE_OT_KEY_SIZE: u16 = 128; + const KAPPA: u16 = 256; + const STATISTICAL_SECURITY_PARAMETER: u16 = 80; + const OTE_PARAMS: MultiplicationOTEParams = + MultiplicationOTEParams:: {}; pub fn trusted_party_keygen( rng: &mut R, @@ -308,58 +53,15 @@ pub mod tests { (secret, shares.0.into_iter().map(|s| s.share).collect()) } - #[test] - fn accumulator_on_deletion() { - let mut rng = StdRng::seed_from_u64(0u64); - const BASE_OT_KEY_SIZE: u16 = 128; - const KAPPA: u16 = 256; - const STATISTICAL_SECURITY_PARAMETER: u16 = 80; - let ote_params = MultiplicationOTEParams:: {}; - let gadget_vector = GadgetVector::::new::< - Blake2b512, - >(ote_params, b"test-gadget-vector"); - - let protocol_id = b"test".to_vec(); - - let threshold_signers = 5; - let total_signers = 8; - let all_party_set = (1..=total_signers).into_iter().collect::>(); + fn do_phase1( + rng: &mut StdRng, + threshold_signers: ParticipantId, + protocol_id: Vec, + ) -> Vec> { let threshold_party_set = (1..=threshold_signers).into_iter().collect::>(); - // The signers do a keygen. This is a one time setup. - let (sk, sk_shares) = - trusted_party_keygen::<_, Fr>(&mut rng, threshold_signers, total_signers); - - let params = SetupParams::::generate_using_rng(&mut rng); - let mut accumulator = PositiveAccumulator::::initialize(¶ms); - let mut state = InMemoryState::new(); - let secret_key = SecretKey(sk); - let secret_key_shares = cfg_iter!(sk_shares) - .map(|s| SecretKey(*s)) - .collect::>(); - - // The signers run OT protocol instances. This is also a one time setup. - let base_ot_outputs = do_pairwise_base_ot::( - &mut rng, - ote_params.num_base_ot(), - total_signers, - all_party_set.clone(), - ); - - let count = 10; - let mut elems = vec![]; - for _ in 0..count { - let elem = Fr::rand(&mut rng); - accumulator = accumulator.add(elem, &secret_key, &mut state).unwrap(); - elems.push(elem); - } - - let remove_element = &elems[5]; - let expected_new = accumulator.compute_new_post_remove(remove_element, &secret_key); - - let mut round1s = vec![]; + let mut phase1s = vec![]; let mut commitments_zero_share = vec![]; - let mut round1outs = vec![]; // Signers initiate round-1 and each signer sends commitments to others let start = Instant::now(); @@ -367,8 +69,14 @@ pub mod tests { let mut others = threshold_party_set.clone(); others.remove(&i); let (round1, comm_zero) = - Phase1::::init(&mut rng, i, others, protocol_id.clone()).unwrap(); - round1s.push(round1); + short_group_sig::threshold_weak_bb_sig::Phase1::::init( + rng, + i, + others, + protocol_id.clone(), + ) + .unwrap(); + phase1s.push(round1); commitments_zero_share.push(comm_zero); } @@ -376,7 +84,7 @@ pub mod tests { for i in 1..=threshold_signers { for j in 1..=threshold_signers { if i != j { - round1s[i as usize - 1] + phase1s[i as usize - 1] .receive_commitment( j, commitments_zero_share[j as usize - 1] @@ -393,9 +101,9 @@ pub mod tests { for i in 1..=threshold_signers { for j in 1..=threshold_signers { if i != j { - let zero_share = round1s[j as usize - 1] + let zero_share = phase1s[j as usize - 1] .get_comm_shares_and_salts_for_zero_sharing_protocol_with_other(&i); - round1s[i as usize - 1] + phase1s[i as usize - 1] .receive_shares(j, zero_share) .unwrap(); } @@ -403,49 +111,82 @@ pub mod tests { } // Signers finish round-1 to generate the output - let mut expected_sk = Fr::zero(); - for (i, round1) in round1s.into_iter().enumerate() { - let out = round1 - .finish::(&secret_key_shares[i].0) - .unwrap(); - expected_sk += out.masked_signing_key_shares; - round1outs.push(out); - } + let phase1_outputs = phase1s + .into_iter() + .map(|p| p.finish::().unwrap()) + .collect::>(); println!("Phase 1 took {:?}", start.elapsed()); + phase1_outputs + } - assert_eq!(expected_sk, sk); - - let mut round2s = vec![]; + fn do_phase2( + rng: &mut StdRng, + threshold_signers: ParticipantId, + gadget_vector: &GadgetVector, + pk_gen: &G1Affine, + base_ot_outputs: &[BaseOTOutput], + phase1_outs: &[short_group_sig::threshold_weak_bb_sig::Phase1Output], + expected_sk_term: Fr, + secret_key_shares: &[Fr], + full_element: Option, + element_shares: Option>, + ) -> Vec> { + let mut phase2s = vec![]; let mut all_msg_1s = vec![]; let label = b"test"; + // Only one of them should be set + assert!(full_element.is_some() ^ element_shares.is_some()); + let known_element = full_element.is_some(); + let full_element = full_element.unwrap_or_default(); + let element_shares = element_shares.unwrap_or_default(); + // Signers initiate round-2 and each signer sends messages to others let start = Instant::now(); for i in 1..=threshold_signers { - let mut others = threshold_party_set.clone(); - others.remove(&i); - let (phase, U) = MultParty::init( - &mut rng, - i, - vec![round1outs[i as usize - 1].masked_signing_key_shares], - vec![round1outs[i as usize - 1].masked_rs], - base_ot_outputs[i as usize - 1].clone(), - others, - ote_params, - &gadget_vector, - label, - ) - .unwrap(); - round2s.push(phase); - all_msg_1s.push((i, U)); + let (phase, msgs) = if known_element { + Phase2::init_for_known_message( + rng, + i, + secret_key_shares[i as usize - 1], + full_element, + phase1_outs[i as usize - 1].clone(), + base_ot_outputs[i as usize - 1].clone(), + OTE_PARAMS, + &gadget_vector, + label, + ) + .unwrap() + } else { + Phase2::init_for_shared_message( + rng, + i, + secret_key_shares[i as usize - 1], + element_shares[i as usize - 1], + phase1_outs[i as usize - 1].clone(), + base_ot_outputs[i as usize - 1].clone(), + OTE_PARAMS, + &gadget_vector, + label, + ) + .unwrap() + }; + phase2s.push(phase); + all_msg_1s.push((i, msgs)); + } + + let mut sk_term = Fr::zero(); + for p in &phase2s { + sk_term += p.masked_sk_term_share } + assert_eq!(expected_sk_term, sk_term); // Signers process round-2 messages received from others let mut all_msg_2s = vec![]; for (sender_id, msg_1s) in all_msg_1s { for (receiver_id, m) in msg_1s { - let m2 = round2s[receiver_id as usize - 1] + let m2 = phase2s[receiver_id as usize - 1] .receive_message1::(sender_id, m, &gadget_vector) .unwrap(); all_msg_2s.push((receiver_id, sender_id, m2)); @@ -453,52 +194,77 @@ pub mod tests { } for (sender_id, receiver_id, m2) in all_msg_2s { - round2s[receiver_id as usize - 1] + phase2s[receiver_id as usize - 1] .receive_message2::(sender_id, m2, &gadget_vector) .unwrap(); } - let round2_outputs = round2s.into_iter().map(|p| p.finish()).collect::>(); + let shares = phase2s + .into_iter() + .map(|p| p.finish(pk_gen)) + .collect::>(); println!("Phase 2 took {:?}", start.elapsed()); + shares + } - // Check that multiplication phase ran successfully, i.e. each signer has an additive share of - // a multiplication with every other signer - for i in 1..=threshold_signers { - for (j, z_A) in &round2_outputs[i as usize - 1].z_A { - let z_B = round2_outputs[*j as usize - 1].z_B.get(&i).unwrap(); - assert_eq!( - z_A.0[0] + z_B.0[0], - round1outs[i as usize - 1].masked_signing_key_shares - * round1outs[*j as usize - 1].masked_rs - ); - assert_eq!( - z_A.1[0] + z_B.1[0], - round1outs[i as usize - 1].masked_rs - * round1outs[*j as usize - 1].masked_signing_key_shares - ); - } - } + #[test] + fn accumulator_on_deletion() { + let mut rng = StdRng::seed_from_u64(0u64); + let gadget_vector = GadgetVector::::new::< + Blake2b512, + >(OTE_PARAMS, b"test-gadget-vector"); - let mut shares = vec![]; - let start = Instant::now(); - for i in 0..threshold_signers as usize { - let share = ShareOfKnownMember::new( - remove_element, - accumulator.value(), - &round1outs[i], - &round2_outputs[i], - ) - .unwrap(); - shares.push(share); + let threshold_signers = 5; + let total_signers = 8; + let all_party_set = (1..=total_signers).into_iter().collect::>(); + + // The signers do a keygen. This is a one time setup. + let (sk, sk_shares) = + trusted_party_keygen::<_, Fr>(&mut rng, threshold_signers, total_signers); + + let params = SetupParams::::generate_using_rng(&mut rng); + let mut accumulator = PositiveAccumulator::::initialize(¶ms); + let mut state = InMemoryState::new(); + let secret_key = SecretKey(sk); + + // The signers run OT protocol instances. This is also a one time setup. + let base_ot_outputs = do_pairwise_base_ot::( + &mut rng, + OTE_PARAMS.num_base_ot(), + total_signers, + all_party_set.clone(), + ); + + let count = 10; + let mut elems = vec![]; + for _ in 0..count { + let elem = Fr::rand(&mut rng); + accumulator = accumulator.add(elem, &secret_key, &mut state).unwrap(); + elems.push(elem); } - println!( - "Creating {} new shares took {:?}", + + let remove_element = &elems[5]; + let expected_new = accumulator.compute_new_post_remove(remove_element, &secret_key); + + let protocol_id = b"test".to_vec(); + + let phase1_outs = do_phase1(&mut rng, threshold_signers, protocol_id.clone()); + + let shares = do_phase2( + &mut rng, threshold_signers, - start.elapsed() + &gadget_vector, + &accumulator.value(), + &base_ot_outputs, + &phase1_outs, + sk, + &sk_shares, + Some(remove_element.clone()), + None, ); let start = Instant::now(); - let updated_accum = ShareOfKnownMember::aggregate(shares); + let updated_accum = SigShare::aggregate(shares); println!( "Aggregating {} shares took {:?}", threshold_signers, @@ -515,20 +281,15 @@ pub mod tests { #[test] fn witness_generation() { let mut rng = StdRng::seed_from_u64(0u64); - const BASE_OT_KEY_SIZE: u16 = 128; - const KAPPA: u16 = 256; - const STATISTICAL_SECURITY_PARAMETER: u16 = 80; - let ote_params = MultiplicationOTEParams:: {}; let gadget_vector = GadgetVector::::new::< Blake2b512, - >(ote_params, b"test-gadget-vector"); + >(OTE_PARAMS, b"test-gadget-vector"); let protocol_id = b"test".to_vec(); let threshold_signers = 5; let total_signers = 8; let all_party_set = (1..=total_signers).into_iter().collect::>(); - let threshold_party_set = (1..=threshold_signers).into_iter().collect::>(); // The signers do a keygen. This is a one time setup. let (sk, sk_shares) = @@ -538,15 +299,12 @@ pub mod tests { let mut accumulator = PositiveAccumulator::::initialize(¶ms); let mut state = InMemoryState::new(); let secret_key = SecretKey(sk); - let secret_key_shares = cfg_iter!(sk_shares) - .map(|s| SecretKey(*s)) - .collect::>(); let public_key = PublicKey::new_from_secret_key(&secret_key, ¶ms); // The signers run OT protocol instances. This is also a one time setup. let base_ot_outputs = do_pairwise_base_ot::( &mut rng, - ote_params.num_base_ot(), + OTE_PARAMS.num_base_ot(), total_signers, all_party_set.clone(), ); @@ -561,151 +319,36 @@ pub mod tests { let member = &elems[1]; let expected_wit = accumulator - .get_membership_witness(&member, &secret_key, &mut state) + .get_membership_witness(member, &secret_key, &mut state) .unwrap(); assert!(accumulator.verify_membership(member, &expected_wit, &public_key, ¶ms)); + let phase1_outs = do_phase1(&mut rng, threshold_signers, protocol_id.clone()); + let (member_shares, _) = deal_secret::(&mut rng, *member, threshold_signers, total_signers).unwrap(); - let mut round1s = vec![]; - let mut commitments_zero_share = vec![]; - let mut round1outs = vec![]; - - // Signers initiate round-1 and each signer sends commitments to others - let start = Instant::now(); - for i in 1..=threshold_signers { - let mut others = threshold_party_set.clone(); - others.remove(&i); - let (round1, comm_zero) = - Phase1::::init(&mut rng, i, others, protocol_id.clone()).unwrap(); - round1s.push(round1); - commitments_zero_share.push(comm_zero); - } - - // Signers process round-1 commitments received from others - for i in 1..=threshold_signers { - for j in 1..=threshold_signers { - if i != j { - round1s[i as usize - 1] - .receive_commitment( - j, - commitments_zero_share[j as usize - 1] - .get(&i) - .unwrap() - .clone(), - ) - .unwrap(); - } - } - } - - // Signers create round-1 shares once they have the required commitments from others - for i in 1..=threshold_signers { - for j in 1..=threshold_signers { - if i != j { - let zero_share = round1s[j as usize - 1] - .get_comm_shares_and_salts_for_zero_sharing_protocol_with_other(&i); - round1s[i as usize - 1] - .receive_shares(j, zero_share) - .unwrap(); - } - } - } - - // Signers finish round-1 to generate the output - let mut expected_sum = Fr::zero(); - for (i, round1) in round1s.into_iter().enumerate() { - let out = round1 - .finish::(&(secret_key_shares[i].0 + member_shares.0[i].share)) - .unwrap(); - expected_sum += out.masked_signing_key_shares; - round1outs.push(out); - } - println!("Phase 1 took {:?}", start.elapsed()); - - assert_eq!(expected_sum, sk + member); - - let label = b"test"; - - let mut round2s = vec![]; - let mut all_msg_1s = vec![]; - - // Signers initiate round-2 and each signer sends messages to others - let start = Instant::now(); - for i in 1..=threshold_signers { - let mut others = threshold_party_set.clone(); - others.remove(&i); - let (phase, U) = MultParty::init( - &mut rng, - i, - vec![round1outs[i as usize - 1].masked_signing_key_shares], - vec![round1outs[i as usize - 1].masked_rs], - base_ot_outputs[i as usize - 1].clone(), - others, - ote_params, - &gadget_vector, - label, - ) - .unwrap(); - round2s.push(phase); - all_msg_1s.push((i, U)); - } - - // Signers process round-2 messages received from others - let mut all_msg_2s = vec![]; - for (sender_id, msg_1s) in all_msg_1s { - for (receiver_id, m) in msg_1s { - let m2 = round2s[receiver_id as usize - 1] - .receive_message1::(sender_id, m, &gadget_vector) - .unwrap(); - all_msg_2s.push((receiver_id, sender_id, m2)); - } - } - - for (sender_id, receiver_id, m2) in all_msg_2s { - round2s[receiver_id as usize - 1] - .receive_message2::(sender_id, m2, &gadget_vector) - .unwrap(); - } - - let round2_outputs = round2s.into_iter().map(|p| p.finish()).collect::>(); - println!("Phase 2 took {:?}", start.elapsed()); - - // Check that multiplication phase ran successfully, i.e. each signer has an additive share of - // a multiplication with every other signer - for i in 1..=threshold_signers { - for (j, z_A) in &round2_outputs[i as usize - 1].z_A { - let z_B = round2_outputs[*j as usize - 1].z_B.get(&i).unwrap(); - assert_eq!( - z_A.0[0] + z_B.0[0], - round1outs[i as usize - 1].masked_signing_key_shares - * round1outs[*j as usize - 1].masked_rs - ); - assert_eq!( - z_A.1[0] + z_B.1[0], - round1outs[i as usize - 1].masked_rs - * round1outs[*j as usize - 1].masked_signing_key_shares - ); - } - } - - let mut shares = vec![]; - let start = Instant::now(); - for i in 0..threshold_signers as usize { - let share = - ShareOfSharedMember::new(accumulator.value(), &round1outs[i], &round2_outputs[i]) - .unwrap(); - shares.push(share); - } - println!( - "Creating {} new shares took {:?}", + let shares = do_phase2( + &mut rng, threshold_signers, - start.elapsed() + &gadget_vector, + &accumulator.value(), + &base_ot_outputs, + &phase1_outs, + sk + member, + &sk_shares, + None, + Some( + member_shares + .0 + .into_iter() + .map(|share| share.share) + .collect(), + ), ); let start = Instant::now(); - let witness = ShareOfSharedMember::aggregate(shares); + let witness = SigShare::aggregate(shares); println!( "Aggregating {} shares took {:?}", threshold_signers,