Skip to main content

p3_field/
helpers.rs

1use alloc::vec::Vec;
2use core::iter::Sum;
3use core::mem::MaybeUninit;
4use core::ops::Mul;
5
6use p3_maybe_rayon::prelude::*;
7
8use crate::field::Field;
9use crate::{PackedValue, PrimeCharacteristicRing, PrimeField, PrimeField32};
10
11/// Computes a multiplicative subgroup whose order is known in advance.
12pub fn cyclic_subgroup_known_order<F: Field>(
13    generator: F,
14    order: usize,
15) -> impl Iterator<Item = F> + Clone {
16    generator.powers().take(order)
17}
18
19/// Computes a coset of a multiplicative subgroup whose order is known in advance.
20pub fn cyclic_subgroup_coset_known_order<F: Field>(
21    generator: F,
22    shift: F,
23    order: usize,
24) -> impl Iterator<Item = F> + Clone {
25    generator.shifted_powers(shift).take(order)
26}
27
28/// Scales each element of the slice by `s` using packing.
29///
30/// # Performance
31/// For large slices, use [`par_scale_slice_in_place`].
32pub fn scale_slice_in_place_single_core<F: Field>(slice: &mut [F], s: F) {
33    let (packed, sfx) = F::Packing::pack_slice_with_suffix_mut(slice);
34    let packed_s: F::Packing = s.into();
35    packed.iter_mut().for_each(|x| *x *= packed_s);
36    sfx.iter_mut().for_each(|x| *x *= s);
37}
38
39/// Scales each element of the slice by `s` using packing and parallelization.
40///
41/// # Performance
42/// For small slices, use [`scale_slice_in_place_single_core`].
43/// Requires the `parallel` feature.
44#[inline]
45pub fn par_scale_slice_in_place<F: Field>(slice: &mut [F], s: F) {
46    let (packed, sfx) = F::Packing::pack_slice_with_suffix_mut(slice);
47    let packed_s: F::Packing = s.into();
48    packed.par_iter_mut().for_each(|x| *x *= packed_s);
49    sfx.iter_mut().for_each(|x| *x *= s);
50}
51
52/// Adds `other`, scaled by `s`, to the mutable `slice` using packing, or `slice += other * s`.
53///
54/// # Performance
55/// For large slices, use [`par_add_scaled_slice_in_place`].
56pub fn add_scaled_slice_in_place<F: Field>(slice: &mut [F], other: &[F], s: F) {
57    debug_assert_eq!(slice.len(), other.len(), "slices must have equal length");
58    let (slice_packed, slice_sfx) = F::Packing::pack_slice_with_suffix_mut(slice);
59    let (other_packed, other_sfx) = F::Packing::pack_slice_with_suffix(other);
60    let packed_s: F::Packing = s.into();
61    slice_packed
62        .iter_mut()
63        .zip(other_packed)
64        .for_each(|(x, y)| *x += *y * packed_s);
65    slice_sfx
66        .iter_mut()
67        .zip(other_sfx)
68        .for_each(|(x, y)| *x += *y * s);
69}
70
71/// Adds `other`, scaled by `s`, to the mutable `slice` using packing, or `slice += other * s`.
72///
73/// # Performance
74/// For small slices, use [`add_scaled_slice_in_place`].
75/// Requires the `parallel` feature.
76pub fn par_add_scaled_slice_in_place<F: Field>(slice: &mut [F], other: &[F], s: F) {
77    debug_assert_eq!(slice.len(), other.len(), "slices must have equal length");
78    let (slice_packed, slice_sfx) = F::Packing::pack_slice_with_suffix_mut(slice);
79    let (other_packed, other_sfx) = F::Packing::pack_slice_with_suffix(other);
80    let packed_s: F::Packing = s.into();
81    slice_packed
82        .par_iter_mut()
83        .zip(other_packed.par_iter())
84        .for_each(|(x, y)| *x += *y * packed_s);
85    slice_sfx
86        .iter_mut()
87        .zip(other_sfx)
88        .for_each(|(x, y)| *x += *y * s);
89}
90
91/// Extend a ring `R` element `x` to an array of length `D`
92/// by filling zeros.
93#[inline]
94#[must_use]
95pub const fn field_to_array<R: PrimeCharacteristicRing, const D: usize>(x: R) -> [R; D] {
96    let mut arr = [const { MaybeUninit::uninit() }; D];
97    arr[0] = MaybeUninit::new(x);
98    let mut i = 1;
99    while i < D {
100        arr[i] = MaybeUninit::new(R::ZERO);
101        i += 1;
102    }
103    unsafe { core::mem::transmute_copy::<_, [R; D]>(&arr) }
104}
105
106/// Given an element x from a 32 bit field F_P compute x/2.
107#[inline]
108#[must_use]
109pub const fn halve_u32<const P: u32>(x: u32) -> u32 {
110    let shift = (P + 1) >> 1;
111    let half = x >> 1;
112    if x & 1 == 0 { half } else { half + shift }
113}
114
115/// Given an element x from a 64 bit field F_P compute x/2.
116#[inline]
117#[must_use]
118pub const fn halve_u64<const P: u64>(x: u64) -> u64 {
119    let shift = (P + 1) >> 1;
120    let half = x >> 1;
121    if x & 1 == 0 { half } else { half + shift }
122}
123
124/// Reduce a slice of 32-bit field elements into a single element of a larger field.
125///
126/// Uses base-$2^{32}$ decomposition:
127///
128/// ```math
129/// \begin{equation}
130///     \text{reduce\_32}(vals) = \sum_{i=0}^{n-1} a_i \cdot 2^{32i}
131/// \end{equation}
132/// ```
133#[must_use]
134pub fn reduce_32<SF: PrimeField32, TF: PrimeField>(vals: &[SF]) -> TF {
135    // If the characteristic of TF is > 2^64, from_int and from_canonical_unchecked act identically
136    let base = TF::from_int(1u64 << 32);
137    vals.iter().rev().fold(TF::ZERO, |acc, val| {
138        acc * base + TF::from_int(val.as_canonical_u32())
139    })
140}
141
142/// Split a large field element into `n` base-$2^{64}$ chunks and map each into a 32-bit field.
143///
144/// Converts:
145/// ```math
146/// \begin{equation}
147///     x = \sum_{i=0}^{n-1} d_i \cdot 2^{64i}
148/// \end{equation}
149/// ```
150///
151/// Pads with zeros if needed.
152#[must_use]
153pub fn split_32<SF: PrimeField, TF: PrimeField32>(val: SF, n: usize) -> Vec<TF> {
154    let mut result: Vec<TF> = val
155        .as_canonical_biguint()
156        .to_u64_digits()
157        .iter()
158        .take(n)
159        .map(|d| TF::from_u64(*d))
160        .collect();
161
162    // Pad with zeros if needed
163    result.resize_with(n, || TF::ZERO);
164    result
165}
166
167/// Maximally generic dot product.
168#[must_use]
169pub fn dot_product<S, LI, RI>(li: LI, ri: RI) -> S
170where
171    LI: Iterator,
172    RI: Iterator,
173    LI::Item: Mul<RI::Item>,
174    S: Sum<<LI::Item as Mul<RI::Item>>::Output>,
175{
176    li.zip(ri).map(|(l, r)| l * r).sum()
177}