Skip to content
Snippets Groups Projects
Commit c81817a3 authored by E__Man's Association's avatar E__Man's Association
Browse files

Frida: added merkle proof for zipped queries

parent 915e4d2d
No related branches found
No related tags found
No related merge requests found
......@@ -319,7 +319,6 @@ fn measure_frida(c: &mut Criterion) {
// Finding best `k` for fixed file size
parametric_degree_bound_fixed_size(c, 67_108_864, FOLDING_FACTOR);
}
criterion_group!(benches, measure_frida);
......
......@@ -39,6 +39,7 @@ pub struct FridaBuilder<F, H: Hasher> {
tree: MerkleTree<H>,
fri_proof: FriProof<F, H>,
zipped_queries: Vec<F>,
zipped_proof: MerkleProof<H>,
num_poly: usize,
}
......@@ -88,16 +89,22 @@ impl<F: FftField, H: Hasher> FridaBuilder<F, H> {
let mut rng = MemoryRng::from(rng);
let proof = build_proof(commitments, &mut rng, num_queries);
let positions = rng.last_positions();
let mut positions = std::mem::take(rng.last_positions_mut());
let zipped_queries = positions
.iter()
.flat_map(|&pos| nth_evaluations(evaluations, pos))
.collect();
// `tree.proof` requires a sorted slice of positions without duplicates
positions.sort_unstable();
positions.dedup();
let zipped_proof = tree.proof(&positions).into();
Self {
tree,
fri_proof: proof,
zipped_queries,
zipped_proof,
num_poly: evaluations.len(),
}
}
......@@ -128,6 +135,7 @@ impl<F: FftField, H: Hasher> FridaBuilder<F, H> {
pub struct FridaCommitment<F, H: Hasher> {
zipped_root: H::Hash,
zipped_queries: Vec<F>,
zipped_proof: MerkleProof<H>,
num_poly: usize,
fri_proof: FriProof<F, H>,
}
......@@ -157,19 +165,40 @@ impl<F: FftField, H: Hasher> FridaCommitment<F, H> {
self.fri_proof
.verify::<N, _>(&mut rng, num_queries, degree_bound, domain_size)?;
let positions = rng.last_positions();
let folded_postions = fold_positions(positions, domain_size / N);
let mut positions = std::mem::take(rng.last_positions_mut());
let folded_postions = fold_positions(&positions, domain_size / N);
let queried = self
.fri_proof
.first_layer()
.queried_evaluations::<N>(positions, &folded_postions, domain_size)
.queried_evaluations::<N>(&positions, &folded_postions, domain_size)
.unwrap();
if queried.len() * self.num_poly != self.zipped_queries.len() {
return Err(FridaError::InvalidZippedQueries);
}
for i in 0..queried.len() {
if queried[i]
let mut indices = (0..positions.len()).collect::<Vec<_>>();
indices.sort_unstable_by(|&i, &j| positions[i].cmp(&positions[j]));
indices.dedup_by(|&mut a, &mut b| positions[a] == positions[b]);
positions.sort_unstable();
positions.dedup();
let hashes = indices
.iter()
.map(|i| {
H::hash_item(&self.zipped_queries[(i * self.num_poly)..((i + 1) * self.num_poly)])
})
.collect::<Vec<_>>();
if !self
.zipped_proof
.verify(self.zipped_root, &positions, &hashes, domain_size)
{
return Err(FridaError::InvalidZippedQueries);
}
for (i, &query) in queried.iter().enumerate() {
if query
!= evaluate(
self.zipped_queries[(i * self.num_poly)..((i + 1) * self.num_poly)]
.iter()
......@@ -191,6 +220,7 @@ impl<F: FftField, H: Hasher> From<FridaBuilder<F, H>> for FridaCommitment<F, H>
Self {
zipped_root,
zipped_queries: value.zipped_queries,
zipped_proof: value.zipped_proof,
fri_proof: value.fri_proof,
num_poly: value.num_poly,
}
......
......@@ -179,6 +179,10 @@ impl<R> MemoryRng<R> {
pub fn last_positions(&self) -> &[usize] {
&self.last_positions
}
/// Same as [`MemoryRng::last_positions`] but returns a mutable reference.
pub fn last_positions_mut(&mut self) -> &mut Vec<usize> {
&mut self.last_positions
}
pub fn into_inner(self) -> R {
self.inner
}
......
......@@ -111,13 +111,16 @@ pub trait HasherExt: Hasher {
///
/// `buffer` is used to store the serialized bytes. Its content when the function returns is unspecified.
/// If it is not empty initially, it will be cleared first.
fn hash_item_with<S: CanonicalSerialize>(value: &S, buffer: &mut Vec<u8>) -> Self::Hash;
fn hash_item_with<S: CanonicalSerialize + ?Sized>(
value: &S,
buffer: &mut Vec<u8>,
) -> Self::Hash;
/// Uses the implementation of [`CanonicalSerialize`] to convert `value` into bytes then return the
/// hash value of those bytes.
///
/// This allocates a new temporary vector to store the serialized bytes.
fn hash_item<S: CanonicalSerialize>(value: &S) -> Self::Hash {
fn hash_item<S: CanonicalSerialize + ?Sized>(value: &S) -> Self::Hash {
Self::hash_item_with(value, &mut Vec::with_capacity(value.compressed_size()))
}
/// Convenience function to hash a slice of values.
......@@ -131,7 +134,10 @@ pub trait HasherExt: Hasher {
}
}
impl<H: Hasher> HasherExt for H {
fn hash_item_with<S: CanonicalSerialize>(value: &S, buffer: &mut Vec<u8>) -> Self::Hash {
fn hash_item_with<S: CanonicalSerialize + ?Sized>(
value: &S,
buffer: &mut Vec<u8>,
) -> Self::Hash {
buffer.clear();
value
.serialize_compressed(&mut *buffer)
......
//! Code shared between tests and benches
use ark_ff::{Fp128, MontBackend, MontConfig};
use rand::{distributions::{Distribution, Standard}, thread_rng, Rng};
use rand::{
distributions::{Distribution, Standard},
thread_rng, Rng,
};
pub const NUMBER_OF_POLYNOMIALS: usize = 10;
pub const POLY_COEFFS_LEN: usize = 4096;
......@@ -18,10 +21,18 @@ pub struct Test;
/// A prime, fft-friendly field isomorph to [`winter_math::fields::f128::BaseElement`].
pub type Fq = Fp128<MontBackend<Test, 2>>;
pub fn random_file<F: Clone>(k: usize, nb_polynomials: usize) -> Vec<Vec<F>> where Standard: Distribution<F> {
pub fn random_file<F: Clone>(k: usize, nb_polynomials: usize) -> Vec<Vec<F>>
where
Standard: Distribution<F>,
{
let nb_items = k * nb_polynomials;
let mut rng = thread_rng();
(0..nb_items).map(|_| rng.gen()).collect::<Vec<_>>().chunks_exact(k).map(<[F]>::to_vec).collect()
(0..nb_items)
.map(|_| rng.gen())
.collect::<Vec<_>>()
.chunks_exact(k)
.map(<[F]>::to_vec)
.collect()
}
#[macro_export]
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment