1use crate::{
2 AdditiveGroup, BigInt, BigInteger, FftField, Field, LegendreSymbol, One, PrimeField,
3 SqrtPrecomputation, Zero,
4};
5use ark_serialize::{
6 buffer_byte_size, CanonicalDeserialize, CanonicalDeserializeWithFlags, CanonicalSerialize,
7 CanonicalSerializeWithFlags, Compress, EmptyFlags, Flags, SerializationError, Valid, Validate,
8};
9use ark_std::{
10 cmp::*,
11 fmt::{Display, Formatter, Result as FmtResult},
12 marker::PhantomData,
13 ops::{Add, AddAssign, Div, DivAssign, Mul, MulAssign, Neg, Sub, SubAssign},
14 str::FromStr,
15 string::*,
16};
17use core::iter;
18
19#[macro_use]
20mod montgomery_backend;
21pub use montgomery_backend::*;
22
23pub trait FpConfig<const N: usize>: Send + Sync + 'static + Sized {
26 const MODULUS: crate::BigInt<N>;
28
29 const GENERATOR: Fp<Self, N>;
33
34 const ZERO: Fp<Self, N>;
37
38 const ONE: Fp<Self, N>;
41
42 const TWO_ADICITY: u32;
46
47 const TWO_ADIC_ROOT_OF_UNITY: Fp<Self, N>;
49
50 const SMALL_SUBGROUP_BASE: Option<u32> = None;
53
54 const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = None;
57
58 const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Fp<Self, N>> = None;
62
63 const SQRT_PRECOMP: Option<SqrtPrecomputation<Fp<Self, N>>>;
67
68 fn add_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
70
71 fn sub_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
73
74 fn double_in_place(a: &mut Fp<Self, N>);
76
77 fn neg_in_place(a: &mut Fp<Self, N>);
79
80 fn mul_assign(a: &mut Fp<Self, N>, b: &Fp<Self, N>);
82
83 fn sum_of_products<const T: usize>(a: &[Fp<Self, N>; T], b: &[Fp<Self, N>; T]) -> Fp<Self, N>;
85
86 fn square_in_place(a: &mut Fp<Self, N>);
88
89 fn inverse(a: &Fp<Self, N>) -> Option<Fp<Self, N>>;
91
92 fn from_bigint(other: BigInt<N>) -> Option<Fp<Self, N>>;
96
97 fn into_bigint(other: Fp<Self, N>) -> BigInt<N>;
100}
101
102#[derive(Educe)]
105#[educe(Default, Hash, Clone, Copy, PartialEq, Eq)]
106pub struct Fp<P: FpConfig<N>, const N: usize>(
107 #[doc(hidden)]
110 pub BigInt<N>,
111 #[doc(hidden)] pub PhantomData<P>,
112);
113
114pub type Fp64<P> = Fp<P, 1>;
115pub type Fp128<P> = Fp<P, 2>;
116pub type Fp192<P> = Fp<P, 3>;
117pub type Fp256<P> = Fp<P, 4>;
118pub type Fp320<P> = Fp<P, 5>;
119pub type Fp384<P> = Fp<P, 6>;
120pub type Fp448<P> = Fp<P, 7>;
121pub type Fp512<P> = Fp<P, 8>;
122pub type Fp576<P> = Fp<P, 9>;
123pub type Fp640<P> = Fp<P, 10>;
124pub type Fp704<P> = Fp<P, 11>;
125pub type Fp768<P> = Fp<P, 12>;
126pub type Fp832<P> = Fp<P, 13>;
127
128impl<P: FpConfig<N>, const N: usize> Fp<P, N> {
129 #[doc(hidden)]
130 #[inline]
131 pub fn is_geq_modulus(&self) -> bool {
132 self.0 >= P::MODULUS
133 }
134
135 #[inline]
136 fn subtract_modulus(&mut self) {
137 if self.is_geq_modulus() {
138 self.0.sub_with_borrow(&Self::MODULUS);
139 }
140 }
141
142 #[inline]
143 fn subtract_modulus_with_carry(&mut self, carry: bool) {
144 if carry || self.is_geq_modulus() {
145 self.0.sub_with_borrow(&Self::MODULUS);
146 }
147 }
148
149 fn num_bits_to_shave() -> usize {
150 64 * N - (Self::MODULUS_BIT_SIZE as usize)
151 }
152}
153
154impl<P: FpConfig<N>, const N: usize> ark_std::fmt::Debug for Fp<P, N> {
155 fn fmt(&self, f: &mut Formatter<'_>) -> ark_std::fmt::Result {
156 ark_std::fmt::Debug::fmt(&self.into_bigint(), f)
157 }
158}
159
160impl<P: FpConfig<N>, const N: usize> Zero for Fp<P, N> {
161 #[inline]
162 fn zero() -> Self {
163 P::ZERO
164 }
165
166 #[inline]
167 fn is_zero(&self) -> bool {
168 *self == P::ZERO
169 }
170}
171
172impl<P: FpConfig<N>, const N: usize> One for Fp<P, N> {
173 #[inline]
174 fn one() -> Self {
175 P::ONE
176 }
177
178 #[inline]
179 fn is_one(&self) -> bool {
180 *self == P::ONE
181 }
182}
183
184impl<P: FpConfig<N>, const N: usize> AdditiveGroup for Fp<P, N> {
185 type Scalar = Self;
186 const ZERO: Self = P::ZERO;
187
188 #[inline]
189 fn double(&self) -> Self {
190 let mut temp = *self;
191 temp.double_in_place();
192 temp
193 }
194
195 #[inline]
196 fn double_in_place(&mut self) -> &mut Self {
197 P::double_in_place(self);
198 self
199 }
200
201 #[inline]
202 fn neg_in_place(&mut self) -> &mut Self {
203 P::neg_in_place(self);
204 self
205 }
206}
207
208impl<P: FpConfig<N>, const N: usize> Field for Fp<P, N> {
209 type BasePrimeField = Self;
210
211 const SQRT_PRECOMP: Option<SqrtPrecomputation<Self>> = P::SQRT_PRECOMP;
212 const ONE: Self = P::ONE;
213
214 fn extension_degree() -> u64 {
215 1
216 }
217
218 fn from_base_prime_field(elem: Self::BasePrimeField) -> Self {
219 elem
220 }
221
222 fn to_base_prime_field_elements(&self) -> impl Iterator<Item = Self::BasePrimeField> {
223 iter::once(*self)
224 }
225
226 fn from_base_prime_field_elems(
227 elems: impl IntoIterator<Item = Self::BasePrimeField>,
228 ) -> Option<Self> {
229 let mut elems = elems.into_iter();
230 let elem = elems.next()?;
231 if elems.next().is_some() {
232 return None;
233 }
234 Some(elem)
235 }
236
237 #[inline]
238 fn characteristic() -> &'static [u64] {
239 P::MODULUS.as_ref()
240 }
241
242 #[inline]
243 fn sum_of_products<const T: usize>(a: &[Self; T], b: &[Self; T]) -> Self {
244 P::sum_of_products(a, b)
245 }
246
247 #[inline]
248 fn from_random_bytes_with_flags<F: Flags>(bytes: &[u8]) -> Option<(Self, F)> {
249 if F::BIT_SIZE > 8 {
250 None
251 } else {
252 let shave_bits = Self::num_bits_to_shave();
253 let mut result_bytes = crate::const_helpers::SerBuffer::<N>::zeroed();
254 result_bytes.copy_from_u8_slice(bytes);
256 let last_limb_mask =
259 (u64::MAX.checked_shr(shave_bits as u32).unwrap_or(0)).to_le_bytes();
260 let mut last_bytes_mask = [0u8; 9];
261 last_bytes_mask[..8].copy_from_slice(&last_limb_mask);
262
263 let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
265 let flag_location = output_byte_size - 1;
268
269 let flag_location_in_last_limb = flag_location.saturating_sub(8 * (N - 1));
271
272 let last_bytes = result_bytes.last_n_plus_1_bytes_mut();
274
275 let flags_mask = u8::MAX.checked_shl(8 - (F::BIT_SIZE as u32)).unwrap_or(0);
277
278 let mut flags: u8 = 0;
281 for (i, (b, m)) in last_bytes.zip(&last_bytes_mask).enumerate() {
282 if i == flag_location_in_last_limb {
283 flags = *b & flags_mask
284 }
285 *b &= m;
286 }
287 Self::deserialize_compressed(&result_bytes.as_slice()[..(N * 8)])
288 .ok()
289 .and_then(|f| F::from_u8(flags).map(|flag| (f, flag)))
290 }
291 }
292
293 #[inline]
294 fn square(&self) -> Self {
295 let mut temp = *self;
296 temp.square_in_place();
297 temp
298 }
299
300 fn square_in_place(&mut self) -> &mut Self {
301 P::square_in_place(self);
302 self
303 }
304
305 #[inline]
306 fn inverse(&self) -> Option<Self> {
307 P::inverse(self)
308 }
309
310 fn inverse_in_place(&mut self) -> Option<&mut Self> {
311 if let Some(inverse) = self.inverse() {
312 *self = inverse;
313 Some(self)
314 } else {
315 None
316 }
317 }
318
319 #[inline]
321 fn frobenius_map_in_place(&mut self, _: usize) {}
322
323 #[inline]
324 fn legendre(&self) -> LegendreSymbol {
325 use crate::fields::LegendreSymbol::*;
326
327 let s = self.pow(Self::MODULUS_MINUS_ONE_DIV_TWO);
329 if s.is_zero() {
330 Zero
331 } else if s.is_one() {
332 QuadraticResidue
333 } else {
334 QuadraticNonResidue
335 }
336 }
337
338 #[inline]
340 fn mul_by_base_prime_field(&self, elem: &Self::BasePrimeField) -> Self {
341 *self * elem
342 }
343}
344
345impl<P: FpConfig<N>, const N: usize> PrimeField for Fp<P, N> {
346 type BigInt = BigInt<N>;
347 const MODULUS: Self::BigInt = P::MODULUS;
348 const MODULUS_MINUS_ONE_DIV_TWO: Self::BigInt = P::MODULUS.divide_by_2_round_down();
349 const MODULUS_BIT_SIZE: u32 = P::MODULUS.const_num_bits();
350 const TRACE: Self::BigInt = P::MODULUS.two_adic_coefficient();
351 const TRACE_MINUS_ONE_DIV_TWO: Self::BigInt = Self::TRACE.divide_by_2_round_down();
352
353 #[inline]
354 fn from_bigint(r: BigInt<N>) -> Option<Self> {
355 P::from_bigint(r)
356 }
357
358 fn into_bigint(self) -> BigInt<N> {
359 P::into_bigint(self)
360 }
361}
362
363impl<P: FpConfig<N>, const N: usize> FftField for Fp<P, N> {
364 const GENERATOR: Self = P::GENERATOR;
365 const TWO_ADICITY: u32 = P::TWO_ADICITY;
366 const TWO_ADIC_ROOT_OF_UNITY: Self = P::TWO_ADIC_ROOT_OF_UNITY;
367 const SMALL_SUBGROUP_BASE: Option<u32> = P::SMALL_SUBGROUP_BASE;
368 const SMALL_SUBGROUP_BASE_ADICITY: Option<u32> = P::SMALL_SUBGROUP_BASE_ADICITY;
369 const LARGE_SUBGROUP_ROOT_OF_UNITY: Option<Self> = P::LARGE_SUBGROUP_ROOT_OF_UNITY;
370}
371
372impl<P: FpConfig<N>, const N: usize> Ord for Fp<P, N> {
379 #[inline(always)]
380 fn cmp(&self, other: &Self) -> Ordering {
381 self.into_bigint().cmp(&other.into_bigint())
382 }
383}
384
385impl<P: FpConfig<N>, const N: usize> PartialOrd for Fp<P, N> {
392 #[inline(always)]
393 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
394 Some(self.cmp(other))
395 }
396}
397
398impl<P: FpConfig<N>, const N: usize> From<u128> for Fp<P, N> {
399 fn from(mut other: u128) -> Self {
400 let mut result = BigInt::default();
401 if N == 1 {
402 result.0[0] = (other % u128::from(P::MODULUS.0[0])) as u64;
403 } else if N == 2 || P::MODULUS.0[2..].iter().all(|&x| x == 0) {
404 let mod_as_u128 = P::MODULUS.0[0] as u128 + ((P::MODULUS.0[1] as u128) << 64);
405 other %= mod_as_u128;
406 result.0[0] = ((other << 64) >> 64) as u64;
407 result.0[1] = (other >> 64) as u64;
408 } else {
409 result.0[0] = ((other << 64) >> 64) as u64;
410 result.0[1] = (other >> 64) as u64;
411 }
412 Self::from_bigint(result).unwrap()
413 }
414}
415
416impl<P: FpConfig<N>, const N: usize> From<i128> for Fp<P, N> {
417 fn from(other: i128) -> Self {
418 let abs = Self::from(other.unsigned_abs());
419 if other.is_positive() {
420 abs
421 } else {
422 -abs
423 }
424 }
425}
426
427impl<P: FpConfig<N>, const N: usize> From<bool> for Fp<P, N> {
428 fn from(other: bool) -> Self {
429 if N == 1 {
430 Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
431 } else {
432 Self::from_bigint(BigInt::from(u64::from(other))).unwrap()
433 }
434 }
435}
436
437impl<P: FpConfig<N>, const N: usize> From<u64> for Fp<P, N> {
438 fn from(other: u64) -> Self {
439 if N == 1 {
440 Self::from_bigint(BigInt::from(other % P::MODULUS.0[0])).unwrap()
441 } else {
442 Self::from_bigint(BigInt::from(other)).unwrap()
443 }
444 }
445}
446
447impl<P: FpConfig<N>, const N: usize> From<i64> for Fp<P, N> {
448 fn from(other: i64) -> Self {
449 let abs = Self::from(other.unsigned_abs());
450 if other.is_positive() {
451 abs
452 } else {
453 -abs
454 }
455 }
456}
457
458impl<P: FpConfig<N>, const N: usize> From<u32> for Fp<P, N> {
459 fn from(other: u32) -> Self {
460 if N == 1 {
461 Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
462 } else {
463 Self::from_bigint(BigInt::from(other)).unwrap()
464 }
465 }
466}
467
468impl<P: FpConfig<N>, const N: usize> From<i32> for Fp<P, N> {
469 fn from(other: i32) -> Self {
470 let abs = Self::from(other.unsigned_abs());
471 if other.is_positive() {
472 abs
473 } else {
474 -abs
475 }
476 }
477}
478
479impl<P: FpConfig<N>, const N: usize> From<u16> for Fp<P, N> {
480 fn from(other: u16) -> Self {
481 if N == 1 {
482 Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
483 } else {
484 Self::from_bigint(BigInt::from(other)).unwrap()
485 }
486 }
487}
488
489impl<P: FpConfig<N>, const N: usize> From<i16> for Fp<P, N> {
490 fn from(other: i16) -> Self {
491 let abs = Self::from(other.unsigned_abs());
492 if other.is_positive() {
493 abs
494 } else {
495 -abs
496 }
497 }
498}
499
500impl<P: FpConfig<N>, const N: usize> From<u8> for Fp<P, N> {
501 fn from(other: u8) -> Self {
502 if N == 1 {
503 Self::from_bigint(BigInt::from(u64::from(other) % P::MODULUS.0[0])).unwrap()
504 } else {
505 Self::from_bigint(BigInt::from(other)).unwrap()
506 }
507 }
508}
509
510impl<P: FpConfig<N>, const N: usize> From<i8> for Fp<P, N> {
511 fn from(other: i8) -> Self {
512 let abs = Self::from(other.unsigned_abs());
513 if other.is_positive() {
514 abs
515 } else {
516 -abs
517 }
518 }
519}
520
521impl<P: FpConfig<N>, const N: usize> ark_std::rand::distributions::Distribution<Fp<P, N>>
522 for ark_std::rand::distributions::Standard
523{
524 #[inline]
525 fn sample<R: ark_std::rand::Rng + ?Sized>(&self, rng: &mut R) -> Fp<P, N> {
526 loop {
527 let mut tmp = Fp(
528 rng.sample(ark_std::rand::distributions::Standard),
529 PhantomData,
530 );
531 let shave_bits = Fp::<P, N>::num_bits_to_shave();
532 assert!(shave_bits <= 64);
534 let mask = if shave_bits == 64 {
535 0
536 } else {
537 u64::MAX >> shave_bits
538 };
539
540 if let Some(val) = tmp.0 .0.last_mut() {
541 *val &= mask
542 }
543
544 if !tmp.is_geq_modulus() {
545 return tmp;
546 }
547 }
548 }
549}
550
551impl<P: FpConfig<N>, const N: usize> CanonicalSerializeWithFlags for Fp<P, N> {
552 fn serialize_with_flags<W: ark_std::io::Write, F: Flags>(
553 &self,
554 writer: W,
555 flags: F,
556 ) -> Result<(), SerializationError> {
557 if F::BIT_SIZE > 8 {
560 return Err(SerializationError::NotEnoughSpace);
561 }
562
563 let output_byte_size = buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE);
567
568 let mut bytes = crate::const_helpers::SerBuffer::zeroed();
572 bytes.copy_from_u64_slice(&self.into_bigint().0);
573 bytes[output_byte_size - 1] |= flags.u8_bitmask();
575
576 bytes.write_up_to(writer, output_byte_size)?;
577 Ok(())
578 }
579
580 fn serialized_size_with_flags<F: Flags>(&self) -> usize {
585 buffer_byte_size(Self::MODULUS_BIT_SIZE as usize + F::BIT_SIZE)
586 }
587}
588
589impl<P: FpConfig<N>, const N: usize> CanonicalSerialize for Fp<P, N> {
590 #[inline]
591 fn serialize_with_mode<W: ark_std::io::Write>(
592 &self,
593 writer: W,
594 _compress: Compress,
595 ) -> Result<(), SerializationError> {
596 self.serialize_with_flags(writer, EmptyFlags)
597 }
598
599 #[inline]
600 fn serialized_size(&self, _compress: Compress) -> usize {
601 self.serialized_size_with_flags::<EmptyFlags>()
602 }
603}
604
605impl<P: FpConfig<N>, const N: usize> CanonicalDeserializeWithFlags for Fp<P, N> {
606 fn deserialize_with_flags<R: ark_std::io::Read, F: Flags>(
607 reader: R,
608 ) -> Result<(Self, F), SerializationError> {
609 if F::BIT_SIZE > 8 {
612 return Err(SerializationError::NotEnoughSpace);
613 }
614 let output_byte_size = Self::zero().serialized_size_with_flags::<F>();
617
618 let mut masked_bytes = crate::const_helpers::SerBuffer::zeroed();
619 masked_bytes.read_exact_up_to(reader, output_byte_size)?;
620 let flags = F::from_u8_remove_flags(&mut masked_bytes[output_byte_size - 1])
621 .ok_or(SerializationError::UnexpectedFlags)?;
622
623 let self_integer = masked_bytes.to_bigint();
624 Self::from_bigint(self_integer)
625 .map(|v| (v, flags))
626 .ok_or(SerializationError::InvalidData)
627 }
628}
629
630impl<P: FpConfig<N>, const N: usize> Valid for Fp<P, N> {
631 fn check(&self) -> Result<(), SerializationError> {
632 Ok(())
633 }
634}
635
636impl<P: FpConfig<N>, const N: usize> CanonicalDeserialize for Fp<P, N> {
637 fn deserialize_with_mode<R: ark_std::io::Read>(
638 reader: R,
639 _compress: Compress,
640 _validate: Validate,
641 ) -> Result<Self, SerializationError> {
642 Self::deserialize_with_flags::<R, EmptyFlags>(reader).map(|(r, _)| r)
643 }
644}
645
646impl<P: FpConfig<N>, const N: usize> FromStr for Fp<P, N> {
647 type Err = ();
648
649 fn from_str(s: &str) -> Result<Self, Self::Err> {
652 use num_bigint::{BigInt, BigUint};
653 use num_traits::Signed;
654
655 let modulus = BigInt::from(P::MODULUS);
656 let mut a = BigInt::from_str(s).map_err(|_| ())? % &modulus;
657 if a.is_negative() {
658 a += modulus
659 }
660 BigUint::try_from(a)
661 .map_err(|_| ())
662 .and_then(TryFrom::try_from)
663 .ok()
664 .and_then(Self::from_bigint)
665 .ok_or(())
666 }
667}
668
669impl<P: FpConfig<N>, const N: usize> Display for Fp<P, N> {
672 #[inline]
673 fn fmt(&self, f: &mut Formatter<'_>) -> FmtResult {
674 let string = self.into_bigint().to_string();
675 write!(f, "{}", string)
676 }
677}
678
679impl<P: FpConfig<N>, const N: usize> Neg for Fp<P, N> {
680 type Output = Self;
681 #[inline]
682 #[must_use]
683 fn neg(mut self) -> Self {
684 P::neg_in_place(&mut self);
685 self
686 }
687}
688
689impl<'a, P: FpConfig<N>, const N: usize> Add<&'a Fp<P, N>> for Fp<P, N> {
690 type Output = Self;
691
692 #[inline]
693 fn add(mut self, other: &Self) -> Self {
694 self.add_assign(other);
695 self
696 }
697}
698
699impl<'a, P: FpConfig<N>, const N: usize> Sub<&'a Fp<P, N>> for Fp<P, N> {
700 type Output = Self;
701
702 #[inline]
703 fn sub(mut self, other: &Self) -> Self {
704 self.sub_assign(other);
705 self
706 }
707}
708
709impl<'a, P: FpConfig<N>, const N: usize> Mul<&'a Fp<P, N>> for Fp<P, N> {
710 type Output = Self;
711
712 #[inline]
713 fn mul(mut self, other: &Self) -> Self {
714 self.mul_assign(other);
715 self
716 }
717}
718
719impl<'a, P: FpConfig<N>, const N: usize> Div<&'a Fp<P, N>> for Fp<P, N> {
720 type Output = Self;
721
722 #[inline]
725 fn div(mut self, other: &Self) -> Self {
726 self.mul_assign(&other.inverse().unwrap());
727 self
728 }
729}
730
731impl<'a, 'b, P: FpConfig<N>, const N: usize> Add<&'b Fp<P, N>> for &'a Fp<P, N> {
732 type Output = Fp<P, N>;
733
734 #[inline]
735 fn add(self, other: &'b Fp<P, N>) -> Fp<P, N> {
736 let mut result = *self;
737 result.add_assign(other);
738 result
739 }
740}
741
742impl<'a, 'b, P: FpConfig<N>, const N: usize> Sub<&'b Fp<P, N>> for &'a Fp<P, N> {
743 type Output = Fp<P, N>;
744
745 #[inline]
746 fn sub(self, other: &Fp<P, N>) -> Fp<P, N> {
747 let mut result = *self;
748 result.sub_assign(other);
749 result
750 }
751}
752
753impl<'a, 'b, P: FpConfig<N>, const N: usize> Mul<&'b Fp<P, N>> for &'a Fp<P, N> {
754 type Output = Fp<P, N>;
755
756 #[inline]
757 fn mul(self, other: &Fp<P, N>) -> Fp<P, N> {
758 let mut result = *self;
759 result.mul_assign(other);
760 result
761 }
762}
763
764impl<'a, 'b, P: FpConfig<N>, const N: usize> Div<&'b Fp<P, N>> for &'a Fp<P, N> {
765 type Output = Fp<P, N>;
766
767 #[inline]
768 fn div(self, other: &Fp<P, N>) -> Fp<P, N> {
769 let mut result = *self;
770 result.div_assign(other);
771 result
772 }
773}
774
775impl<'a, P: FpConfig<N>, const N: usize> AddAssign<&'a Self> for Fp<P, N> {
776 #[inline]
777 fn add_assign(&mut self, other: &Self) {
778 P::add_assign(self, other)
779 }
780}
781
782impl<'a, P: FpConfig<N>, const N: usize> SubAssign<&'a Self> for Fp<P, N> {
783 #[inline]
784 fn sub_assign(&mut self, other: &Self) {
785 P::sub_assign(self, other);
786 }
787}
788
789#[allow(unused_qualifications)]
790impl<P: FpConfig<N>, const N: usize> core::ops::Add<Self> for Fp<P, N> {
791 type Output = Self;
792
793 #[inline]
794 fn add(mut self, other: Self) -> Self {
795 self.add_assign(&other);
796 self
797 }
798}
799
800#[allow(unused_qualifications)]
801impl<'a, P: FpConfig<N>, const N: usize> core::ops::Add<&'a mut Self> for Fp<P, N> {
802 type Output = Self;
803
804 #[inline]
805 fn add(mut self, other: &'a mut Self) -> Self {
806 self.add_assign(&*other);
807 self
808 }
809}
810
811#[allow(unused_qualifications)]
812impl<P: FpConfig<N>, const N: usize> core::ops::Sub<Self> for Fp<P, N> {
813 type Output = Self;
814
815 #[inline]
816 fn sub(mut self, other: Self) -> Self {
817 self.sub_assign(&other);
818 self
819 }
820}
821
822#[allow(unused_qualifications)]
823impl<'a, P: FpConfig<N>, const N: usize> core::ops::Sub<&'a mut Self> for Fp<P, N> {
824 type Output = Self;
825
826 #[inline]
827 fn sub(mut self, other: &'a mut Self) -> Self {
828 self.sub_assign(&*other);
829 self
830 }
831}
832
833#[allow(unused_qualifications)]
834impl<P: FpConfig<N>, const N: usize> core::iter::Sum<Self> for Fp<P, N> {
835 fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
836 iter.fold(Self::zero(), core::ops::Add::add)
837 }
838}
839
840#[allow(unused_qualifications)]
841impl<'a, P: FpConfig<N>, const N: usize> core::iter::Sum<&'a Self> for Fp<P, N> {
842 fn sum<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
843 iter.fold(Self::zero(), core::ops::Add::add)
844 }
845}
846
847#[allow(unused_qualifications)]
848impl<P: FpConfig<N>, const N: usize> core::ops::AddAssign<Self> for Fp<P, N> {
849 #[inline(always)]
850 fn add_assign(&mut self, other: Self) {
851 self.add_assign(&other)
852 }
853}
854
855#[allow(unused_qualifications)]
856impl<P: FpConfig<N>, const N: usize> core::ops::SubAssign<Self> for Fp<P, N> {
857 #[inline(always)]
858 fn sub_assign(&mut self, other: Self) {
859 self.sub_assign(&other)
860 }
861}
862
863#[allow(unused_qualifications)]
864impl<'a, P: FpConfig<N>, const N: usize> core::ops::AddAssign<&'a mut Self> for Fp<P, N> {
865 #[inline(always)]
866 fn add_assign(&mut self, other: &'a mut Self) {
867 self.add_assign(&*other)
868 }
869}
870
871#[allow(unused_qualifications)]
872impl<'a, P: FpConfig<N>, const N: usize> core::ops::SubAssign<&'a mut Self> for Fp<P, N> {
873 #[inline(always)]
874 fn sub_assign(&mut self, other: &'a mut Self) {
875 self.sub_assign(&*other)
876 }
877}
878
879impl<'a, P: FpConfig<N>, const N: usize> MulAssign<&'a Self> for Fp<P, N> {
880 fn mul_assign(&mut self, other: &Self) {
881 P::mul_assign(self, other)
882 }
883}
884
885impl<'a, P: FpConfig<N>, const N: usize> DivAssign<&'a Self> for Fp<P, N> {
888 #[inline(always)]
889 fn div_assign(&mut self, other: &Self) {
890 self.mul_assign(&other.inverse().unwrap());
891 }
892}
893
894#[allow(unused_qualifications)]
895impl<P: FpConfig<N>, const N: usize> core::ops::Mul<Self> for Fp<P, N> {
896 type Output = Self;
897
898 #[inline(always)]
899 fn mul(mut self, other: Self) -> Self {
900 self.mul_assign(&other);
901 self
902 }
903}
904
905#[allow(unused_qualifications)]
906impl<P: FpConfig<N>, const N: usize> core::ops::Div<Self> for Fp<P, N> {
907 type Output = Self;
908
909 #[inline(always)]
910 fn div(mut self, other: Self) -> Self {
911 self.div_assign(&other);
912 self
913 }
914}
915
916#[allow(unused_qualifications)]
917impl<'a, P: FpConfig<N>, const N: usize> core::ops::Mul<&'a mut Self> for Fp<P, N> {
918 type Output = Self;
919
920 #[inline(always)]
921 fn mul(mut self, other: &'a mut Self) -> Self {
922 self.mul_assign(&*other);
923 self
924 }
925}
926
927#[allow(unused_qualifications)]
928impl<'a, P: FpConfig<N>, const N: usize> core::ops::Div<&'a mut Self> for Fp<P, N> {
929 type Output = Self;
930
931 #[inline(always)]
932 fn div(mut self, other: &'a mut Self) -> Self {
933 self.div_assign(&*other);
934 self
935 }
936}
937
938#[allow(unused_qualifications)]
939impl<P: FpConfig<N>, const N: usize> core::iter::Product<Self> for Fp<P, N> {
940 fn product<I: Iterator<Item = Self>>(iter: I) -> Self {
941 iter.fold(Self::one(), core::ops::Mul::mul)
942 }
943}
944
945#[allow(unused_qualifications)]
946impl<'a, P: FpConfig<N>, const N: usize> core::iter::Product<&'a Self> for Fp<P, N> {
947 fn product<I: Iterator<Item = &'a Self>>(iter: I) -> Self {
948 iter.fold(Self::one(), Mul::mul)
949 }
950}
951
952#[allow(unused_qualifications)]
953impl<P: FpConfig<N>, const N: usize> core::ops::MulAssign<Self> for Fp<P, N> {
954 #[inline(always)]
955 fn mul_assign(&mut self, other: Self) {
956 self.mul_assign(&other)
957 }
958}
959
960#[allow(unused_qualifications)]
961impl<'a, P: FpConfig<N>, const N: usize> core::ops::DivAssign<&'a mut Self> for Fp<P, N> {
962 #[inline(always)]
963 fn div_assign(&mut self, other: &'a mut Self) {
964 self.div_assign(&*other)
965 }
966}
967
968#[allow(unused_qualifications)]
969impl<'a, P: FpConfig<N>, const N: usize> core::ops::MulAssign<&'a mut Self> for Fp<P, N> {
970 #[inline(always)]
971 fn mul_assign(&mut self, other: &'a mut Self) {
972 self.mul_assign(&*other)
973 }
974}
975
976#[allow(unused_qualifications)]
977impl<P: FpConfig<N>, const N: usize> core::ops::DivAssign<Self> for Fp<P, N> {
978 #[inline(always)]
979 fn div_assign(&mut self, other: Self) {
980 self.div_assign(&other)
981 }
982}
983
984impl<P: FpConfig<N>, const N: usize> zeroize::Zeroize for Fp<P, N> {
985 fn zeroize(&mut self) {
988 self.0.zeroize();
989 }
990}
991
992impl<P: FpConfig<N>, const N: usize> From<num_bigint::BigUint> for Fp<P, N> {
993 #[inline]
994 fn from(val: num_bigint::BigUint) -> Fp<P, N> {
995 Fp::<P, N>::from_le_bytes_mod_order(&val.to_bytes_le())
996 }
997}
998
999impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for num_bigint::BigUint {
1000 #[inline(always)]
1001 fn from(other: Fp<P, N>) -> Self {
1002 other.into_bigint().into()
1003 }
1004}
1005
1006impl<P: FpConfig<N>, const N: usize> From<Fp<P, N>> for BigInt<N> {
1007 #[inline(always)]
1008 fn from(fp: Fp<P, N>) -> Self {
1009 fp.into_bigint()
1010 }
1011}
1012
1013impl<P: FpConfig<N>, const N: usize> From<BigInt<N>> for Fp<P, N> {
1014 #[inline(always)]
1016 fn from(int: BigInt<N>) -> Self {
1017 Self::from_bigint(int).unwrap()
1018 }
1019}