1#![no_std]
2#![cfg_attr(docsrs, feature(doc_cfg))]
3#![doc = include_str!("../README.md")]
4#![doc(
5 html_logo_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg",
6 html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/meta/master/logo.svg"
7)]
8#![allow(clippy::undocumented_unsafe_blocks)] #![warn(
10 clippy::arithmetic_side_effects,
11 clippy::integer_division_remainder_used,
12 clippy::panic
13)]
14
15#[macro_use]
16mod macros;
17
18mod array;
19mod backends;
20mod slice;
21
22use core::{
23 cmp,
24 num::{
25 NonZeroI8, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI128, NonZeroIsize, NonZeroU8,
26 NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU128, NonZeroUsize,
27 },
28};
29
30pub type Condition = u8;
36
37pub trait Cmov {
39 fn cmovnz(&mut self, value: &Self, condition: Condition);
43
44 fn cmovz(&mut self, value: &Self, condition: Condition) {
48 let nz = masknz!(condition: Condition);
49 self.cmovnz(value, !nz);
50 }
51}
52
53pub trait CmovEq {
55 fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
59 let mut tmp = 1u8;
60 self.cmoveq(rhs, 0u8, &mut tmp);
61 tmp.cmoveq(&1u8, input, output);
62 }
63
64 fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
68 let mut tmp = 1u8;
69 self.cmovne(rhs, 0u8, &mut tmp);
70 tmp.cmoveq(&1, input, output);
71 }
72}
73
74impl Cmov for u8 {
75 #[inline]
76 fn cmovnz(&mut self, value: &Self, condition: Condition) {
77 let mut tmp = u16::from(*self);
78 tmp.cmovnz(&u16::from(*value), condition);
79 debug_assert!(u8::try_from(tmp).is_ok());
80 *self = (tmp & 0xFF) as u8;
81 }
82
83 #[inline]
84 fn cmovz(&mut self, value: &Self, condition: Condition) {
85 let mut tmp = u16::from(*self);
86 tmp.cmovz(&u16::from(*value), condition);
87 debug_assert!(u8::try_from(tmp).is_ok());
88 *self = (tmp & 0xFF) as u8;
89 }
90}
91
92impl Cmov for u128 {
93 #[inline]
94 fn cmovnz(&mut self, value: &Self, condition: Condition) {
95 let mut lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
96 let mut hi = (*self >> 64) as u64;
97
98 lo.cmovnz(&((*value & 0xFFFF_FFFF_FFFF_FFFF) as u64), condition);
99 hi.cmovnz(&((*value >> 64) as u64), condition);
100
101 *self = u128::from(lo) | (u128::from(hi) << 64);
102 }
103
104 #[inline]
105 fn cmovz(&mut self, value: &Self, condition: Condition) {
106 let mut lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
107 let mut hi = (*self >> 64) as u64;
108
109 lo.cmovz(&((*value & 0xFFFF_FFFF_FFFF_FFFF) as u64), condition);
110 hi.cmovz(&((*value >> 64) as u64), condition);
111
112 *self = u128::from(lo) | (u128::from(hi) << 64);
113 }
114}
115
116impl CmovEq for u8 {
117 #[inline]
118 fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
119 u16::from(*self).cmoveq(&u16::from(*rhs), input, output);
120 }
121
122 #[inline]
123 fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
124 u16::from(*self).cmovne(&u16::from(*rhs), input, output);
125 }
126}
127
128impl CmovEq for u128 {
129 #[inline]
130 fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
131 let lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
132 let hi = (*self >> 64) as u64;
133
134 let mut tmp = 1u8;
135 lo.cmovne(&((*rhs & 0xFFFF_FFFF_FFFF_FFFF) as u64), 0, &mut tmp);
136 hi.cmovne(&((*rhs >> 64) as u64), 0, &mut tmp);
137 tmp.cmoveq(&0, input, output);
138 }
139
140 #[inline]
141 fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
142 let lo = (*self & 0xFFFF_FFFF_FFFF_FFFF) as u64;
143 let hi = (*self >> 64) as u64;
144
145 let mut tmp = 1u8;
146 lo.cmovne(&((*rhs & 0xFFFF_FFFF_FFFF_FFFF) as u64), 0, &mut tmp);
147 hi.cmovne(&((*rhs >> 64) as u64), 0, &mut tmp);
148 tmp.cmoveq(&1, input, output);
149 }
150}
151
152macro_rules! impl_cmov_traits_for_signed_ints {
155 ( $($int:ty => $uint:ty),+ ) => {
156 $(
157 impl Cmov for $int {
158 #[inline]
159 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
160 fn cmovnz(&mut self, value: &Self, condition: Condition) {
161 let mut tmp = *self as $uint;
162 tmp.cmovnz(&(*value as $uint), condition);
163 *self = tmp as $int;
164 }
165
166 #[inline]
167 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
168 fn cmovz(&mut self, value: &Self, condition: Condition) {
169 let mut tmp = *self as $uint;
170 tmp.cmovz(&(*value as $uint), condition);
171 *self = tmp as $int;
172 }
173 }
174
175 impl CmovEq for $int {
176 #[inline]
177 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
178 fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
179 (*self as $uint).cmoveq(&(*rhs as $uint), input, output);
180 }
181
182 #[inline]
183 #[allow(clippy::cast_possible_wrap, clippy::cast_sign_loss)]
184 fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
185 (*self as $uint).cmovne(&(*rhs as $uint), input, output);
186 }
187 }
188 )+
189 };
190}
191
192impl_cmov_traits_for_signed_ints!(
193 i8 => u8,
194 i16 => u16,
195 i32 => u32,
196 i64 => u64,
197 i128 => u128
198);
199
200macro_rules! impl_cmov_traits_for_size_int {
201 ($size:ty, $int16:ty, $int32:ty, $int64:ty) => {
202 #[cfg(any(
203 target_pointer_width = "16",
204 target_pointer_width = "32",
205 target_pointer_width = "64"
206 ))]
207 #[cfg_attr(docsrs, doc(cfg(true)))]
208 #[allow(clippy::cast_possible_truncation)]
209 impl Cmov for $size {
210 #[cfg(target_pointer_width = "16")]
211 #[inline]
212 fn cmovnz(&mut self, other: &Self, condition: Condition) {
213 let mut tmp = *self as $int16;
214 tmp.cmovnz(&(*other as $int16), condition);
215 *self = tmp as $size;
216 }
217
218 #[cfg(target_pointer_width = "32")]
219 #[inline]
220 fn cmovnz(&mut self, other: &Self, condition: Condition) {
221 let mut tmp = *self as $int32;
222 tmp.cmovnz(&(*other as $int32), condition);
223 *self = tmp as $size;
224 }
225
226 #[cfg(target_pointer_width = "64")]
227 #[inline]
228 fn cmovnz(&mut self, other: &Self, condition: Condition) {
229 let mut tmp = *self as $int64;
230 tmp.cmovnz(&(*other as $int64), condition);
231 *self = tmp as $size;
232 }
233 }
234
235 #[cfg(any(
236 target_pointer_width = "16",
237 target_pointer_width = "32",
238 target_pointer_width = "64"
239 ))]
240 #[cfg_attr(docsrs, doc(cfg(true)))]
241 #[allow(clippy::cast_possible_truncation)]
242 impl CmovEq for $size {
243 #[cfg(target_pointer_width = "16")]
244 #[inline]
245 fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
246 (*self as $int16).cmovne(&(*rhs as $int16), input, output);
247 }
248
249 #[cfg(target_pointer_width = "32")]
250 #[inline]
251 fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
252 (*self as $int32).cmovne(&(*rhs as $int32), input, output);
253 }
254
255 #[cfg(target_pointer_width = "64")]
256 #[inline]
257 fn cmovne(&self, rhs: &Self, input: Condition, output: &mut Condition) {
258 (*self as $int64).cmovne(&(*rhs as $int64), input, output);
259 }
260 }
261 };
262}
263
264impl_cmov_traits_for_size_int!(isize, i16, i32, i64);
265impl_cmov_traits_for_size_int!(usize, u16, u32, u64);
266
267macro_rules! impl_cmov_traits_for_nonzero_integers {
269 ( $($nzint:ident),+ ) => {
270 $(
271 impl Cmov for $nzint {
272 #[inline]
273 fn cmovnz(&mut self, src: &Self, condition: Condition) {
274 let mut n = self.get();
275 n.cmovnz(&src.get(), condition);
276
277 #[allow(unsafe_code)]
280 unsafe { *self = $nzint::new_unchecked(n) }
281 }
282 }
283
284 impl CmovEq for $nzint {
285 #[inline]
286 fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
287 self.get().cmoveq(&rhs.get(), input, output);
288 }
289 }
290 )+
291 };
292}
293
294impl_cmov_traits_for_nonzero_integers!(
295 NonZeroI8,
296 NonZeroI16,
297 NonZeroI32,
298 NonZeroI64,
299 NonZeroI128,
300 NonZeroIsize,
301 NonZeroU8,
302 NonZeroU16,
303 NonZeroU32,
304 NonZeroU64,
305 NonZeroU128,
306 NonZeroUsize
307);
308
309impl Cmov for cmp::Ordering {
310 #[inline]
311 fn cmovnz(&mut self, src: &Self, condition: Condition) {
312 let mut n = *self as i8;
321 n.cmovnz(&(*src as i8), condition);
322
323 #[allow(trivial_casts, unsafe_code)]
326 unsafe {
327 *self = *(&raw const n).cast::<Self>();
328 }
329 }
330}
331
332impl CmovEq for cmp::Ordering {
333 #[inline]
334 fn cmoveq(&self, rhs: &Self, input: Condition, output: &mut Condition) {
335 (*self as i8).cmoveq(&(*rhs as i8), input, output);
336 }
337}