Skip to main content

cmov/
lib.rs

1#![no_std]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![doc = include_str!("../README.md")]
4#![doc(
5    html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg",
6    html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg"
7)]
8#![warn(
9    clippy::cast_lossless,
10    clippy::cast_possible_truncation,
11    clippy::cast_precision_loss,
12    clippy::checked_conversions,
13    clippy::implicit_saturating_sub,
14    clippy::integer_division_remainder_used,
15    clippy::mod_module_files,
16    clippy::panic,
17    clippy::panic_in_result_fn,
18    clippy::ref_as_ptr,
19    clippy::return_self_not_must_use,
20    clippy::semicolon_if_nothing_returned,
21    clippy::std_instead_of_alloc,
22    clippy::std_instead_of_core,
23    clippy::unwrap_used,
24    missing_copy_implementations,
25    missing_debug_implementations,
26    missing_docs,
27    rust_2018_idioms,
28    trivial_casts,
29    trivial_numeric_casts,
30    unused_lifetimes,
31    unused_qualifications
32)]
33
34#[macro_use]
35mod macros;
36
37mod array;
38mod backends;
39mod slice;
40
41use core::{
42    cmp,
43    num::{
44        NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, NonZeroU8, NonZeroU16,
45        NonZeroU32, NonZeroU64, NonZeroU128,
46    },
47};
48
49/// Condition: the argument given to [`Cmov`] and [`CmovEq`] representing an effective boolean
50/// condition by virtue of being zero or non-zero.
51///
52/// Using a `u8` for this type helps prevent rustc optimizers from speculating about it as if it
53/// were a boolean value.
54pub type Condition = u8;
55
56/// Conditional move
57pub trait Cmov {
58    /// Move if non-zero.
59    ///
60    /// Moves `value` to `self` in constant-time if `condition` is non-zero.
61    fn cmovnz(&mut self, value: &Self, condition: Condition);
62
63    /// Move if zero.
64    ///
65    /// Moves `value` to `self` in constant-time if `condition` is equal to zero.
66    fn cmovz(&mut self, value: &Self, condition: Condition) {
67        let nz = masknz!(condition: Condition);
68        self.cmovnz(value, !nz);
69    }
70}
71
72/// Conditional move with equality comparison
73pub trait CmovEq {
74    /// Move if both inputs are not equal.
75    ///
76    /// Moves `input` to `output` in constant-time if `self` and `rhs` are NOT equal.
77    fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
78        let mut tmp = 1u8;
79        self.cmoveq(rhs, 0u8, &mut tmp);
80        tmp.cmoveq(&1u8, input, output);
81    }
82
83    /// Move if both inputs are equal.
84    ///
85    /// Moves `input` to `output` in constant-time if `self` and `rhs` are equal.
86    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
87        let mut tmp = 1u8;
88        self.cmovne(rhs, 0u8, &mut tmp);
89        tmp.cmoveq(&1, input, output);
90    }
91}
92
93impl Cmov for u8 {
94    #[inline]
95    fn cmovnz(&mut self, value: &Self, condition: Condition) {
96        let mut tmp = u16::from(*self);
97        tmp.cmovnz(&u16::from(*value), condition);
98        debug_assert!(u8::try_from(tmp).is_ok());
99        *self = (tmp & 0xFF) as u8;
100    }
101
102    #[inline]
103    fn cmovz(&mut self, value: &Self, condition: Condition) {
104        let mut tmp = u16::from(*self);
105        tmp.cmovz(&u16::from(*value), condition);
106        debug_assert!(u8::try_from(tmp).is_ok());
107        *self = (tmp & 0xFF) as u8;
108    }
109}
110
111impl Cmov for u128 {
112    #[inline]
113    fn cmovnz(&mut self, value: &Self, condition: Condition) {
114        let mut lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
115        let mut hi = (*self >> 64) as u64;
116
117        lo.cmovnz(&((*value & 0xFFFF_FFFF_FFFF_FFFF) as u64), condition);
118        hi.cmovnz(&((*value >> 64) as u64), condition);
119
120        *self = u128::from(lo) | (u128::from(hi) << 64);
121    }
122
123    #[inline]
124    fn cmovz(&mut self, value: &Self, condition: Condition) {
125        let mut lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
126        let mut hi = (*self >> 64) as u64;
127
128        lo.cmovz(&((*value & 0xFFFF_FFFF_FFFF_FFFF) as u64), condition);
129        hi.cmovz(&((*value >> 64) as u64), condition);
130
131        *self = u128::from(lo) | (u128::from(hi) << 64);
132    }
133}
134
135impl CmovEq for u8 {
136    #[inline]
137    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
138        u16::from(*self).cmoveq(&u16::from(*rhs), input, output);
139    }
140
141    #[inline]
142    fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
143        u16::from(*self).cmovne(&u16::from(*rhs), input, output);
144    }
145}
146
147impl CmovEq for u128 {
148    #[inline]
149    fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
150        let lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
151        let hi = (*self >> 64) as u64;
152
153        let mut tmp = 1u8;
154        lo.cmovne(&((*rhs & 0xFFFF_FFFF_FFFF_FFFF) as u64), 0, &mut tmp);
155        hi.cmovne(&((*rhs >> 64) as u64), 0, &mut tmp);
156        tmp.cmoveq(&0, input, output);
157    }
158
159    #[inline]
160    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
161        let lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
162        let hi = (*self >> 64) as u64;
163
164        let mut tmp = 1u8;
165        lo.cmovne(&((*rhs & 0xFFFF_FFFF_FFFF_FFFF) as u64), 0, &mut tmp);
166        hi.cmovne(&((*rhs >> 64) as u64), 0, &mut tmp);
167        tmp.cmoveq(&1, input, output);
168    }
169}
170
171/// Impl `Cmov*` by first casting to unsigned then using the unsigned `Cmov` impls
172// TODO(tarcieri): use `cast_unsigned`/`cast_signed` to get rid of the `=> u*`
173macro_rules! impl_cmov_traits_for_signed_ints {
174    ( $($int:ty => $uint:ty),+ ) => {
175        $(
176            impl Cmov for $int {
177                #[inline]
178                fn cmovnz(&mut self, value: &Self, condition: Condition) {
179                    let mut tmp = *self as $uint;
180                    tmp.cmovnz(&(*value as $uint), condition);
181                    *self = tmp as $int;
182                }
183
184                #[inline]
185                fn cmovz(&mut self, value: &Self, condition: Condition) {
186                    let mut tmp = *self as $uint;
187                    tmp.cmovz(&(*value as $uint), condition);
188                    *self = tmp as $int;
189                }
190            }
191
192            impl CmovEq for $int {
193                #[inline]
194                fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
195                    (*self as $uint).cmoveq(&(*rhs as $uint), input, output);
196                }
197
198                #[inline]
199                fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
200                    (*self as $uint).cmovne(&(*rhs as $uint), input, output);
201                }
202            }
203        )+
204    };
205}
206
207impl_cmov_traits_for_signed_ints!(
208    i8 => u8,
209    i16 => u16,
210    i32 => u32,
211    i64 => u64,
212    i128 => u128
213);
214
215macro_rules! impl_cmov_traits_for_size_int {
216    ($size:ty, $int16:ty, $int32:ty, $int64:ty) => {
217        #[cfg(any(
218            target_pointer_width = "16",
219            target_pointer_width = "32",
220            target_pointer_width = "64"
221        ))]
222        #[cfg_attr(docsrs, doc(cfg(true)))]
223        #[allow(clippy::cast_possible_truncation)]
224        impl Cmov for $size {
225            #[cfg(target_pointer_width = "16")]
226            #[inline]
227            fn cmovnz(&mut self, other: &Self, condition: Condition) {
228                let mut tmp = *self as $int16;
229                tmp.cmovnz(&(*other as $int16), condition);
230                *self = tmp as $size;
231            }
232
233            #[cfg(target_pointer_width = "32")]
234            #[inline]
235            fn cmovnz(&mut self, other: &Self, condition: Condition) {
236                let mut tmp = *self as $int32;
237                tmp.cmovnz(&(*other as $int32), condition);
238                *self = tmp as $size;
239            }
240
241            #[cfg(target_pointer_width = "64")]
242            #[inline]
243            fn cmovnz(&mut self, other: &Self, condition: Condition) {
244                let mut tmp = *self as $int64;
245                tmp.cmovnz(&(*other as $int64), condition);
246                *self = tmp as $size;
247            }
248        }
249
250        #[cfg(any(
251            target_pointer_width = "16",
252            target_pointer_width = "32",
253            target_pointer_width = "64"
254        ))]
255        #[cfg_attr(docsrs, doc(cfg(true)))]
256        #[allow(clippy::cast_possible_truncation)]
257        impl CmovEq for $size {
258            #[cfg(target_pointer_width = "16")]
259            #[inline]
260            fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
261                (*self as $int16).cmovne(&(*rhs as $int16), input, output);
262            }
263
264            #[cfg(target_pointer_width = "32")]
265            #[inline]
266            fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
267                (*self as $int32).cmovne(&(*rhs as $int32), input, output);
268            }
269
270            #[cfg(target_pointer_width = "64")]
271            #[inline]
272            fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
273                (*self as $int64).cmovne(&(*rhs as $int64), input, output);
274            }
275        }
276    };
277}
278
279impl_cmov_traits_for_size_int!(isize, i16, i32, i64);
280impl_cmov_traits_for_size_int!(usize, u16, u32, u64);
281
282/// Impl `Cmov` for `NonZero<T>` by calling the `Cmov` impl for `T`.
283macro_rules! impl_cmov_traits_for_nonzero_integers {
284    ( $($nzint:ident),+ ) => {
285        $(
286             impl Cmov for $nzint {
287                #[inline]
288                fn cmovnz(&mut self, src: &Self, condition: Condition) {
289                    let mut n = self.get();
290                    n.cmovnz(&src.get(), condition);
291
292                    // SAFETY: we are constructing `NonZero` from a value we obtained from
293                    // `NonZero::get`, which ensures it's non-zero.
294                    #[allow(unsafe_code)]
295                    unsafe { *self = $nzint::new_unchecked(n) }
296                }
297            }
298
299            impl CmovEq for $nzint {
300                #[inline]
301                fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
302                    self.get().cmoveq(&rhs.get(), input, output);
303                }
304            }
305        )+
306    };
307}
308
309impl_cmov_traits_for_nonzero_integers!(
310    NonZeroI8,
311    NonZeroI16,
312    NonZeroI32,
313    NonZeroI64,
314    NonZeroI128,
315    NonZeroU8,
316    NonZeroU16,
317    NonZeroU32,
318    NonZeroU64,
319    NonZeroU128
320);
321
322impl Cmov for cmp::Ordering {
323    #[inline]
324    fn cmovnz(&mut self, src: &Self, condition: Condition) {
325        // `Ordering` is `#[repr(i8)]` where:
326        //
327        // - `Less` => -1
328        // - `Equal` => 0
329        // - `Greater` => 1
330        //
331        // Given this, it's possible to operate on orderings as if they're `i8`, which allows us to
332        // use the `CtSelect` impl on `i8` to select between them.
333        let mut n = *self as i8;
334        n.cmovnz(&(*src as i8), condition);
335
336        // SAFETY: `Ordering` is `#[repr(i8)]` and `ret` has been assigned to
337        // a value which was originally a valid `Ordering` then cast to `i8`
338        #[allow(trivial_casts, unsafe_code)]
339        unsafe {
340            *self = *(&raw const n).cast::<Self>();
341        }
342    }
343}
344
345impl CmovEq for cmp::Ordering {
346    #[inline]
347    fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
348        (*self as i8).cmoveq(&(*rhs as i8), input, output);
349    }
350}