Skip to main content

p3_challenger/
grinding_challenger.rs

1use p3_field::{Field, PackedValue, PrimeField, PrimeField32, PrimeField64};
2use p3_maybe_rayon::prelude::*;
3use p3_symmetric::CryptographicPermutation;
4use tracing::instrument;
5
6use crate::{
7    CanObserve, CanSampleBits, CanSampleUniformBits, DuplexChallenger, MultiField32Challenger,
8    UniformSamplingField,
9};
10
11/// Trait for challengers that support proof-of-work (PoW) grinding.
12///
13/// A `GrindingChallenger` can:
14/// - Absorb a candidate witness into the transcript
15/// - Sample random bitstrings to check the PoW condition
16/// - Brute-force search for a valid witness that satisfies the PoW
17///
18/// This trait is typically used in protocols requiring computational effort
19/// from the prover.
20pub trait GrindingChallenger:
21    CanObserve<Self::Witness> + CanSampleBits<usize> + Sync + Clone
22{
23    /// The underlying field element type used as the witness.
24    type Witness: Field;
25
26    /// Perform a brute-force search to find a valid PoW witness.
27    ///
28    /// Given a `bits` parameter, this function searches for a field element
29    /// `witness` such that after observing it, the next `bits` bits that challenger outputs
30    /// are all `0`.
31    fn grind(&mut self, bits: usize) -> Self::Witness;
32
33    /// Check whether a given `witness` satisfies the PoW condition.
34    ///
35    /// After absorbing the witness, the challenger samples `bits` random bits
36    /// and verifies that all bits sampled are zero.
37    ///
38    /// Returns `true` if the witness passes the PoW check, `false` otherwise.
39    #[must_use]
40    fn check_witness(&mut self, bits: usize, witness: Self::Witness) -> bool {
41        if bits == 0 {
42            return true;
43        }
44        self.observe(witness);
45        self.sample_bits(bits) == 0
46    }
47}
48
49/// Trait for challengers that support proof-of-work (PoW) grinding with
50/// guaranteed uniformly sampled bits.
51pub trait UniformGrindingChallenger:
52    GrindingChallenger + CanSampleUniformBits<Self::Witness>
53{
54    /// Grinds based on *uniformly sampled bits*. This variant is allowed to do rejection
55    /// sampling if a value is sampled that would violate our uniformity requirement
56    /// (chance of about 1/P).
57    ///
58    /// Use this together with `check_witness_uniform`.
59    fn grind_uniform(&mut self, bits: usize) -> Self::Witness;
60
61    /// Grinds based on *uniformly sampled bits*. This variant errors if a value is
62    /// sampled, which would violate our uniformity requirement (chance of about 1/P).
63    /// See the `UniformSamplingField` trait implemented for each field for details.
64    ///
65    /// Use this together with `check_witness_uniform_may_error`.
66    fn grind_uniform_may_error(&mut self, bits: usize) -> Self::Witness;
67
68    /// Check whether a given `witness` satisfies the PoW condition.
69    ///
70    /// After absorbing the witness, the challenger samples `bits` random bits
71    /// *uniformly* and verifies that all bits sampled are zero. The uniform
72    /// sampling implies we do rejection sampling in about ~1/P cases.
73    ///
74    /// Returns `true` if the witness passes the PoW check, `false` otherwise.
75    fn check_witness_uniform(&mut self, bits: usize, witness: Self::Witness) -> bool {
76        self.observe(witness);
77        self.sample_uniform_bits::<true>(bits)
78            .expect("Error impossible here due to resampling strategy")
79            == 0
80    }
81
82    /// Check whether a given `witness` satisfies the PoW condition.
83    ///
84    /// After absorbing the witness, the challenger samples `bits` random bits
85    /// *uniformly* and verifies that all bits sampled are zero. In about ~1/P
86    /// cases this function may error if a sampled value lies outside a range
87    /// in which we can guarantee uniform bits.
88    ///
89    /// Returns `true` if the witness passes the PoW check, `false` otherwise.
90    fn check_witness_uniform_may_error(&mut self, bits: usize, witness: Self::Witness) -> bool {
91        self.observe(witness);
92        self.sample_uniform_bits::<false>(bits)
93            .is_ok_and(|v| v == 0)
94    }
95}
96
97impl<F, P, const WIDTH: usize, const RATE: usize> GrindingChallenger
98    for DuplexChallenger<F, P, WIDTH, RATE>
99where
100    F: PrimeField64,
101    P: CryptographicPermutation<[F; WIDTH]>
102        + CryptographicPermutation<[<F as Field>::Packing; WIDTH]>,
103{
104    type Witness = F;
105
106    #[instrument(name = "grind for proof-of-work witness", skip_all)]
107    fn grind(&mut self, bits: usize) -> Self::Witness {
108        // Ensure `bits` is small enough to be used in a shift.
109        assert!(bits < (usize::BITS as usize), "bit count must be valid");
110
111        // Ensure the PoW target 2^bits is smaller than the field order.
112        // Otherwise, the probability analysis for grinding would break.
113        assert!((1 << bits) < F::ORDER_U64);
114
115        // Trivial case: 0 bits mean no PoW is required and any witness is valid.
116        if bits == 0 {
117            return F::ZERO;
118        }
119
120        // SIMD width: number of field elements processed in parallel.
121        // Each SIMD lane corresponds to one candidate witness.
122        let lanes = F::Packing::WIDTH;
123
124        // Total number of batches needed to cover all field elements.
125        // Each batch tests `lanes` witnesses in parallel.
126        let num_batches = (F::ORDER_U64 as usize).div_ceil(lanes);
127
128        // Cache the field order.
129        let order = F::ORDER_U64;
130
131        // Bitmask used to check the PoW condition. eg. bits = 3 => mask = 0b111
132        // We accept a witness if (sample & mask) == 0. This verifies 'bits' trailing zeros.
133        let mask = (1usize << bits) - 1;
134
135        // In a duplex sponge, new inputs are absorbed sequentially at indices [0, 1, 2, ...].
136        // The grinding witness is therefore absorbed at the next available position.
137        let witness_idx = self.input_buffer.len();
138
139        // Build the sponge state as packed field elements (SIMD vectors).
140        //
141        // The current transcript is split across:
142        // - `input_buffer`: recently observed transcript elements that have not yet been permuted
143        // - `sponge_state`: the internal sponge state after previous permutations
144        //
145        // Logically, the next permutation would act on:
146        //   [input_buffer || sponge_state]
147        //
148        // This is invariant across batches, so we compute it once.
149        let base_packed_state: [_; WIDTH] = core::array::from_fn(|i| {
150            if i < self.input_buffer.len() {
151                // Broadcast buffered transcript elements (input_buffer) to all SIMD lanes.
152                F::Packing::from(self.input_buffer[i])
153            } else {
154                // Broadcast existing sponge state (sponge_state) to all SIMD lanes.
155                F::Packing::from(self.sponge_state[i])
156            }
157        });
158
159        // Grinding is implemented via parallel brute-force search over candidate witnesses.
160        //
161        // For efficiency, the search is vectorized using SIMD:
162        // It is semantically equivalent to serially trying witnesses until the PoW condition is met.
163        //
164        // - Each SIMD lane corresponds to a distinct candidate witness
165        // - All lanes share the same transcript prefix
166        // - A single permutation evaluates multiple candidates in parallel
167        let witness = (0..num_batches)
168            .into_par_iter()
169            .find_map_any(|batch| {
170                // Compute the starting candidate for this batch.
171                //
172                // Each batch processes `F::Packing::WIDTH` candidates:
173                //   - Batch 0 -> candidates [0, 1, ..., F::Packing::WIDTH - 1]
174                //   - Batch 1 -> candidates [F::Packing::WIDTH, ..., 2 * F::Packing::WIDTH - 1]
175                //   - Batch k -> candidates [k * F::Packing::WIDTH, ..., (k+1) * F::Packing::WIDTH - 1]
176                let base = (batch * lanes) as u64;
177
178                // Start with a copy of the precomputed base state.
179                let mut packed_state = base_packed_state;
180
181                // Generate SIMD-packed candidate witnesses.
182                // Each lane receives a distinct field element.
183                //   [base + 0, base + 1, ..., base + F::Packing::WIDTH - 1]
184                let packed_witnesses = F::Packing::from_fn(|lane| {
185                    let candidate = base + lane as u64;
186                    if candidate < order {
187                        // SAFETY: candidate < field order, so this is a valid canonical field element.
188                        unsafe { F::from_canonical_unchecked(candidate) }
189                    } else {
190                        // Values outside the field order can never satisfy PoW, so we repeat the last potential witness
191                        F::NEG_ONE
192                    }
193                });
194
195                // Insert the candidate witnesses at the next absorption position.
196                //
197                // This simulates absorbing `transcript || witness` before the Fiat–Shamir challenge is derived.
198                packed_state[witness_idx] = packed_witnesses;
199
200                // Apply the cryptographic permutation (SIMD version)
201                //
202                // This permutes all `lanes` candidates simultaneously.
203                self.permutation.permute_mut(&mut packed_state);
204
205                // Check each lane for the PoW condition
206                //
207                // - In a duplex sponge, output is read from position [RATE-1] (last rate element).
208                // - We check if the low `bits` of each sample are all zeros.
209                //
210                // We scan SIMD lanes to find the first candidate whose output satisfies the PoW condition.
211                packed_state[RATE - 1]
212                    .as_slice()
213                    .iter()
214                    .zip(packed_witnesses.as_slice())
215                    .find(|(sample, _)| {
216                        // Accept if the low `bits` bits are all zero.
217                        (sample.as_canonical_u64() as usize & mask) == 0
218                    })
219                    .map(|(_, &witness)| witness)
220            })
221            .expect("failed to find proof-of-work witness");
222
223        // Double-check the witness using the standard verifier logic and update the challenger state.
224        assert!(self.check_witness(bits, witness));
225
226        witness
227    }
228}
229
230impl<F, P, const WIDTH: usize, const RATE: usize> UniformGrindingChallenger
231    for DuplexChallenger<F, P, WIDTH, RATE>
232where
233    F: UniformSamplingField + PrimeField64,
234    P: CryptographicPermutation<[F; WIDTH]>
235        + CryptographicPermutation<[<F as Field>::Packing; WIDTH]>,
236{
237    #[instrument(name = "grind uniform for proof-of-work witness", skip_all)]
238    fn grind_uniform(&mut self, bits: usize) -> Self::Witness {
239        // Call the generic grinder with the "resample" checking logic.
240        self.grind_generic(bits, |challenger, witness| {
241            challenger.check_witness_uniform(bits, witness)
242        })
243    }
244    #[instrument(name = "grind uniform may error for proof-of-work witness", skip_all)]
245    fn grind_uniform_may_error(&mut self, bits: usize) -> Self::Witness {
246        // Call the generic grinder with the "error" checking logic.
247        self.grind_generic(bits, |challenger, witness| {
248            challenger.check_witness_uniform_may_error(bits, witness)
249        })
250    }
251}
252impl<F, P, const WIDTH: usize, const RATE: usize> DuplexChallenger<F, P, WIDTH, RATE>
253where
254    F: PrimeField64,
255    P: CryptographicPermutation<[F; WIDTH]>,
256{
257    /// A generic, private helper for PoW grinding, parameterized by the checking function.
258    fn grind_generic<CHECK>(&mut self, bits: usize, check_fn: CHECK) -> F
259    where
260        CHECK: Fn(&mut Self, F) -> bool + Sync + Send,
261    {
262        // Maybe check that bits is greater than 0?
263        assert!(bits < (usize::BITS as usize), "bit count must be valid");
264        assert!(
265            (1u64 << bits) < F::ORDER_U64,
266            "bit count exceeds field order"
267        );
268        // The core parallel brute-force search logic.
269        let witness = (0..F::ORDER_U64)
270            .into_par_iter()
271            .map(|i| unsafe {
272                // This is safe as i is always in range.
273                F::from_canonical_unchecked(i)
274            })
275            .find_any(|&witness| check_fn(&mut self.clone(), witness))
276            .expect("failed to find proof-of-work witness");
277        // Run the check one last time on the *original* challenger to update its state
278        // and confirm the witness is valid.
279        assert!(check_fn(self, witness));
280        witness
281    }
282}
283
284impl<F, PF, P, const WIDTH: usize, const RATE: usize> GrindingChallenger
285    for MultiField32Challenger<F, PF, P, WIDTH, RATE>
286where
287    F: PrimeField32,
288    PF: PrimeField,
289    P: CryptographicPermutation<[PF; WIDTH]>,
290{
291    type Witness = F;
292
293    #[instrument(name = "grind for proof-of-work witness", skip_all)]
294    fn grind(&mut self, bits: usize) -> Self::Witness {
295        assert!(bits < (usize::BITS as usize), "bit count must be valid");
296        assert!((1 << bits) < F::ORDER_U32);
297        let witness = (0..F::ORDER_U32)
298            .into_par_iter()
299            .map(|i| unsafe {
300                // i < F::ORDER_U32 by construction so this is safe.
301                F::from_canonical_unchecked(i)
302            })
303            .find_any(|witness| self.clone().check_witness(bits, *witness))
304            .expect("failed to find witness");
305        assert!(self.check_witness(bits, witness));
306        witness
307    }
308}