Skip to main content

blake3/
lib.rs

1//! The official Rust implementation of the [BLAKE3] cryptographic hash
2//! function.
3//!
4//! # Examples
5//!
6//! ```
7//! # fn main() -> Result<(), Box<dyn std::error::Error>> {
8//! // Hash an input all at once.
9//! let hash1 = blake3::hash(b"foobarbaz");
10//!
11//! // Hash an input incrementally.
12//! let mut hasher = blake3::Hasher::new();
13//! hasher.update(b"foo");
14//! hasher.update(b"bar");
15//! hasher.update(b"baz");
16//! let hash2 = hasher.finalize();
17//! assert_eq!(hash1, hash2);
18//!
19//! // Extended output. OutputReader also implements Read and Seek.
20//! # #[cfg(feature = "std")] {
21//! let mut output = [0; 1000];
22//! let mut output_reader = hasher.finalize_xof();
23//! output_reader.fill(&mut output);
24//! assert_eq!(hash1, output[..32]);
25//! # }
26//!
27//! // Print a hash as hex.
28//! println!("{}", hash1);
29//! # Ok(())
30//! # }
31//! ```
32//!
33//! # Cargo Features
34//!
35//! The `std` feature (the only feature enabled by default) enables the
36//! [`Write`] implementation and the [`update_reader`](Hasher::update_reader)
37//! method for [`Hasher`], and also the [`Read`] and [`Seek`] implementations
38//! for [`OutputReader`].
39//!
40//! The `rayon` feature (disabled by default, but enabled for [docs.rs]) adds
41//! the [`update_rayon`](Hasher::update_rayon) and (in combination with `mmap`
42//! below) [`update_mmap_rayon`](Hasher::update_mmap_rayon) methods for
43//! multithreaded hashing. However, even if this feature is enabled, all other
44//! APIs remain single-threaded.
45//!
46//! The `mmap` feature (disabled by default, but enabled for [docs.rs]) adds the
47//! [`update_mmap`](Hasher::update_mmap) and (in combination with `rayon` above)
48//! [`update_mmap_rayon`](Hasher::update_mmap_rayon) helper methods for
49//! memory-mapped IO.
50//!
51//! The `zeroize` feature (disabled by default, but enabled for [docs.rs])
52//! implements
53//! [`Zeroize`](https://docs.rs/zeroize/latest/zeroize/trait.Zeroize.html) for
54//! this crate's types.
55//!
56//! The `serde` feature (disabled by default, but enabled for [docs.rs]) implements
57//! [`serde::Serialize`](https://docs.rs/serde/latest/serde/trait.Serialize.html) and
58//! [`serde::Deserialize`](https://docs.rs/serde/latest/serde/trait.Deserialize.html)
59//! for [`Hash`](struct@Hash).
60//!
61//! The NEON implementation is enabled by default for AArch64 but requires the
62//! `neon` feature for other ARM targets. Not all ARMv7 CPUs support NEON, and
63//! enabling this feature will produce a binary that's not portable to CPUs
64//! without NEON support.
65//!
66//! The `wasm32_simd` feature enables the WASM SIMD implementation for all `wasm32-`
67//! targets. Similar to the `neon` feature, if `wasm32_simd` is enabled, WASM SIMD
68//! support is assumed. This may become the default in the future.
69//!
70//! The `traits-preview` feature enables implementations of traits from the
71//! RustCrypto [`digest`] crate, and re-exports that crate as `traits::digest`.
72//! However, the traits aren't stable, and they're expected to change in
73//! incompatible ways before that crate reaches 1.0. For that reason, this crate
74//! makes no SemVer guarantees for this feature, and callers who use it should
75//! expect breaking changes between patch versions. (The "-preview" feature name
76//! follows the conventions of the RustCrypto [`signature`] crate.)
77//!
78//! [`Hasher::update_rayon`]: struct.Hasher.html#method.update_rayon
79//! [BLAKE3]: https://blake3.io
80//! [Rayon]: https://github.com/rayon-rs/rayon
81//! [docs.rs]: https://docs.rs/
82//! [`Read`]: https://doc.rust-lang.org/std/io/trait.Read.html
83//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
84//! [`Seek`]: https://doc.rust-lang.org/std/io/trait.Seek.html
85//! [`digest`]: https://crates.io/crates/digest
86//! [`signature`]: https://crates.io/crates/signature
87
88#![cfg_attr(not(feature = "std"), no_std)]
89
90#[cfg(test)]
91mod test;
92
93#[doc(hidden)]
94#[deprecated(since = "1.8.0", note = "use the hazmat module instead")]
95pub mod guts;
96
97pub mod hazmat;
98
99/// Undocumented and unstable, for benchmarks only.
100#[doc(hidden)]
101pub mod platform;
102
103// Platform-specific implementations of the compression function. These
104// BLAKE3-specific cfg flags are set in build.rs.
105#[cfg(blake3_avx2_rust)]
106#[path = "rust_avx2.rs"]
107mod avx2;
108#[cfg(blake3_avx2_ffi)]
109#[path = "ffi_avx2.rs"]
110mod avx2;
111#[cfg(blake3_avx512_ffi)]
112#[path = "ffi_avx512.rs"]
113mod avx512;
114#[cfg(blake3_neon)]
115#[path = "ffi_neon.rs"]
116mod neon;
117mod portable;
118#[cfg(blake3_sse2_rust)]
119#[path = "rust_sse2.rs"]
120mod sse2;
121#[cfg(blake3_sse2_ffi)]
122#[path = "ffi_sse2.rs"]
123mod sse2;
124#[cfg(blake3_sse41_rust)]
125#[path = "rust_sse41.rs"]
126mod sse41;
127#[cfg(blake3_sse41_ffi)]
128#[path = "ffi_sse41.rs"]
129mod sse41;
130
131#[cfg(blake3_wasm32_simd)]
132#[path = "wasm32_simd.rs"]
133mod wasm32_simd;
134
135#[cfg(feature = "traits-preview")]
136pub mod traits;
137
138mod io;
139mod join;
140
141use arrayref::{array_mut_ref, array_ref};
142use arrayvec::{ArrayString, ArrayVec};
143use core::cmp;
144use core::fmt;
145use platform::{Platform, MAX_SIMD_DEGREE, MAX_SIMD_DEGREE_OR_2};
146#[cfg(feature = "zeroize")]
147use zeroize::Zeroize;
148
149/// The number of bytes in a [`Hash`](struct.Hash.html), 32.
150pub const OUT_LEN: usize = 32;
151
152/// The number of bytes in a key, 32.
153pub const KEY_LEN: usize = 32;
154
155/// The number of bytes in a block, 64.
156///
157/// You don't usually need to think about this number. One case where it matters is calling
158/// [`OutputReader::fill`] in a loop, where using a `buf` argument that's a multiple of `BLOCK_LEN`
159/// avoids repeating work.
160pub const BLOCK_LEN: usize = 64;
161
162/// The number of bytes in a chunk, 1024.
163///
164/// You don't usually need to think about this number, but it often comes up in benchmarks, because
165/// the maximum degree of parallelism used by the implementation equals the number of chunks.
166pub const CHUNK_LEN: usize = 1024;
167
168const MAX_DEPTH: usize = 54; // 2^54 * CHUNK_LEN = 2^64
169
170// While iterating the compression function within a chunk, the CV is
171// represented as words, to avoid doing two extra endianness conversions for
172// each compression in the portable implementation. But the hash_many interface
173// needs to hash both input bytes and parent nodes, so its better for its
174// output CVs to be represented as bytes.
175type CVWords = [u32; 8];
176type CVBytes = [u8; 32]; // little-endian
177
178const IV: &CVWords = &[
179    0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19,
180];
181
182const MSG_SCHEDULE: [[usize; 16]; 7] = [
183    [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
184    [2, 6, 3, 10, 7, 0, 4, 13, 1, 11, 12, 5, 9, 14, 15, 8],
185    [3, 4, 10, 12, 13, 2, 7, 14, 6, 5, 9, 0, 11, 15, 8, 1],
186    [10, 7, 12, 9, 14, 3, 13, 15, 4, 0, 11, 2, 5, 8, 1, 6],
187    [12, 13, 9, 11, 15, 10, 14, 8, 7, 2, 5, 3, 0, 1, 6, 4],
188    [9, 14, 11, 5, 8, 12, 15, 1, 13, 3, 0, 10, 2, 6, 4, 7],
189    [11, 15, 5, 0, 1, 9, 8, 6, 14, 10, 2, 12, 3, 4, 7, 13],
190];
191
192// These are the internal flags that we use to domain separate root/non-root,
193// chunk/parent, and chunk beginning/middle/end. These get set at the high end
194// of the block flags word in the compression function, so their values start
195// high and go down.
196const CHUNK_START: u8 = 1 << 0;
197const CHUNK_END: u8 = 1 << 1;
198const PARENT: u8 = 1 << 2;
199const ROOT: u8 = 1 << 3;
200const KEYED_HASH: u8 = 1 << 4;
201const DERIVE_KEY_CONTEXT: u8 = 1 << 5;
202const DERIVE_KEY_MATERIAL: u8 = 1 << 6;
203
204#[inline]
205fn counter_low(counter: u64) -> u32 {
206    counter as u32
207}
208
209#[inline]
210fn counter_high(counter: u64) -> u32 {
211    (counter >> 32) as u32
212}
213
214/// An output of the default size, 32 bytes, which provides constant-time
215/// equality checking.
216///
217/// `Hash` implements [`From`] and [`Into`] for `[u8; 32]`, and it provides
218/// [`from_bytes`] and [`as_bytes`] for explicit conversions between itself and
219/// `[u8; 32]`. However, byte arrays and slices don't provide constant-time
220/// equality checking, which is often a security requirement in software that
221/// handles private data. `Hash` doesn't implement [`Deref`] or [`AsRef`], to
222/// avoid situations where a type conversion happens implicitly and the
223/// constant-time property is accidentally lost.
224///
225/// `Hash` provides the [`to_hex`] and [`from_hex`] methods for converting to
226/// and from hexadecimal. It also implements [`Display`] and [`FromStr`].
227///
228/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
229/// [`Into`]: https://doc.rust-lang.org/std/convert/trait.Into.html
230/// [`as_bytes`]: #method.as_bytes
231/// [`from_bytes`]: #method.from_bytes
232/// [`Deref`]: https://doc.rust-lang.org/stable/std/ops/trait.Deref.html
233/// [`AsRef`]: https://doc.rust-lang.org/std/convert/trait.AsRef.html
234/// [`to_hex`]: #method.to_hex
235/// [`from_hex`]: #method.from_hex
236/// [`Display`]: https://doc.rust-lang.org/std/fmt/trait.Display.html
237/// [`FromStr`]: https://doc.rust-lang.org/std/str/trait.FromStr.html
238#[cfg_attr(feature = "serde", derive(serde::Deserialize, serde::Serialize))]
239#[derive(Clone, Copy, Hash, Eq)]
240pub struct Hash([u8; OUT_LEN]);
241
242impl Hash {
243    /// The raw bytes of the `Hash`. Note that byte arrays don't provide
244    /// constant-time equality checking, so if  you need to compare hashes,
245    /// prefer the `Hash` type.
246    #[inline]
247    pub const fn as_bytes(&self) -> &[u8; OUT_LEN] {
248        &self.0
249    }
250
251    /// Create a `Hash` from its raw bytes representation.
252    pub const fn from_bytes(bytes: [u8; OUT_LEN]) -> Self {
253        Self(bytes)
254    }
255
256    /// The raw bytes of the `Hash`, as a slice. Useful for serialization. Note that byte arrays
257    /// don't provide constant-time equality checking, so if you need to compare hashes, prefer
258    /// the `Hash` type.
259    #[inline]
260    pub const fn as_slice(&self) -> &[u8] {
261        self.0.as_slice()
262    }
263
264    /// Create a `Hash` from its raw bytes representation as a slice.
265    ///
266    /// Returns an error if the slice is not exactly 32 bytes long.
267    pub fn from_slice(bytes: &[u8]) -> Result<Self, core::array::TryFromSliceError> {
268        Ok(Self::from_bytes(bytes.try_into()?))
269    }
270
271    /// Encode a `Hash` in lowercase hexadecimal.
272    ///
273    /// The returned [`ArrayString`] is a fixed size and doesn't allocate memory
274    /// on the heap. Note that [`ArrayString`] doesn't provide constant-time
275    /// equality checking, so if you need to compare hashes, prefer the `Hash`
276    /// type.
277    ///
278    /// [`ArrayString`]: https://docs.rs/arrayvec/0.5.1/arrayvec/struct.ArrayString.html
279    pub fn to_hex(&self) -> ArrayString<{ 2 * OUT_LEN }> {
280        let mut s = ArrayString::new();
281        let table = b"0123456789abcdef";
282        for &b in self.0.iter() {
283            s.push(table[(b >> 4) as usize] as char);
284            s.push(table[(b & 0xf) as usize] as char);
285        }
286        s
287    }
288
289    /// Decode a `Hash` from hexadecimal. Both uppercase and lowercase ASCII
290    /// bytes are supported.
291    ///
292    /// Any byte outside the ranges `'0'...'9'`, `'a'...'f'`, and `'A'...'F'`
293    /// results in an error. An input length other than 64 also results in an
294    /// error.
295    ///
296    /// Note that `Hash` also implements `FromStr`, so `Hash::from_hex("...")`
297    /// is equivalent to `"...".parse()`.
298    pub fn from_hex(hex: impl AsRef<[u8]>) -> Result<Self, HexError> {
299        fn hex_val(byte: u8) -> Result<u8, HexError> {
300            match byte {
301                b'A'..=b'F' => Ok(byte - b'A' + 10),
302                b'a'..=b'f' => Ok(byte - b'a' + 10),
303                b'0'..=b'9' => Ok(byte - b'0'),
304                _ => Err(HexError(HexErrorInner::InvalidByte(byte))),
305            }
306        }
307        let hex_bytes: &[u8] = hex.as_ref();
308        if hex_bytes.len() != OUT_LEN * 2 {
309            return Err(HexError(HexErrorInner::InvalidLen(hex_bytes.len())));
310        }
311        let mut hash_bytes: [u8; OUT_LEN] = [0; OUT_LEN];
312        for i in 0..OUT_LEN {
313            hash_bytes[i] = 16 * hex_val(hex_bytes[2 * i])? + hex_val(hex_bytes[2 * i + 1])?;
314        }
315        Ok(Hash::from(hash_bytes))
316    }
317}
318
319impl From<[u8; OUT_LEN]> for Hash {
320    #[inline]
321    fn from(bytes: [u8; OUT_LEN]) -> Self {
322        Self::from_bytes(bytes)
323    }
324}
325
326impl From<Hash> for [u8; OUT_LEN] {
327    #[inline]
328    fn from(hash: Hash) -> Self {
329        hash.0
330    }
331}
332
333impl core::str::FromStr for Hash {
334    type Err = HexError;
335
336    fn from_str(s: &str) -> Result<Self, Self::Err> {
337        Hash::from_hex(s)
338    }
339}
340
341#[cfg(feature = "zeroize")]
342impl Zeroize for Hash {
343    fn zeroize(&mut self) {
344        // Destructuring to trigger compile error as a reminder to update this impl.
345        let Self(bytes) = self;
346        bytes.zeroize();
347    }
348}
349
350/// This implementation is constant-time.
351impl PartialEq for Hash {
352    #[inline]
353    fn eq(&self, other: &Hash) -> bool {
354        constant_time_eq::constant_time_eq_32(&self.0, &other.0)
355    }
356}
357
358/// This implementation is constant-time.
359impl PartialEq<[u8; OUT_LEN]> for Hash {
360    #[inline]
361    fn eq(&self, other: &[u8; OUT_LEN]) -> bool {
362        constant_time_eq::constant_time_eq_32(&self.0, other)
363    }
364}
365
366/// This implementation is constant-time if the target is 32 bytes long.
367impl PartialEq<[u8]> for Hash {
368    #[inline]
369    fn eq(&self, other: &[u8]) -> bool {
370        constant_time_eq::constant_time_eq(&self.0, other)
371    }
372}
373
374impl fmt::Display for Hash {
375    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
376        // Formatting field as `&str` to reduce code size since the `Debug`
377        // dynamic dispatch table for `&str` is likely needed elsewhere already,
378        // but that for `ArrayString<[u8; 64]>` is not.
379        let hex = self.to_hex();
380        let hex: &str = hex.as_str();
381
382        f.write_str(hex)
383    }
384}
385
386impl fmt::Debug for Hash {
387    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
388        // Formatting field as `&str` to reduce code size since the `Debug`
389        // dynamic dispatch table for `&str` is likely needed elsewhere already,
390        // but that for `ArrayString<[u8; 64]>` is not.
391        let hex = self.to_hex();
392        let hex: &str = hex.as_str();
393
394        f.debug_tuple("Hash").field(&hex).finish()
395    }
396}
397
398/// The error type for [`Hash::from_hex`].
399///
400/// The `.to_string()` representation of this error currently distinguishes between bad length
401/// errors and bad character errors. This is to help with logging and debugging, but it isn't a
402/// stable API detail, and it may change at any time.
403#[derive(Clone, Debug)]
404pub struct HexError(HexErrorInner);
405
406#[derive(Clone, Debug)]
407enum HexErrorInner {
408    InvalidByte(u8),
409    InvalidLen(usize),
410}
411
412impl fmt::Display for HexError {
413    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
414        match self.0 {
415            HexErrorInner::InvalidByte(byte) => {
416                if byte < 128 {
417                    write!(f, "invalid hex character: {:?}", byte as char)
418                } else {
419                    write!(f, "invalid hex character: 0x{:x}", byte)
420                }
421            }
422            HexErrorInner::InvalidLen(len) => {
423                write!(f, "expected 64 hex bytes, received {}", len)
424            }
425        }
426    }
427}
428
429#[cfg(feature = "std")]
430impl std::error::Error for HexError {}
431
432// Each chunk or parent node can produce either a 32-byte chaining value or, by
433// setting the ROOT flag, any number of final output bytes. The Output struct
434// captures the state just prior to choosing between those two possibilities.
435#[derive(Clone)]
436struct Output {
437    input_chaining_value: CVWords,
438    block: [u8; 64],
439    block_len: u8,
440    counter: u64,
441    flags: u8,
442    platform: Platform,
443}
444
445impl Output {
446    fn chaining_value(&self) -> CVBytes {
447        let mut cv = self.input_chaining_value;
448        self.platform.compress_in_place(
449            &mut cv,
450            &self.block,
451            self.block_len,
452            self.counter,
453            self.flags,
454        );
455        platform::le_bytes_from_words_32(&cv)
456    }
457
458    fn root_hash(&self) -> Hash {
459        debug_assert_eq!(self.counter, 0);
460        let mut cv = self.input_chaining_value;
461        self.platform
462            .compress_in_place(&mut cv, &self.block, self.block_len, 0, self.flags | ROOT);
463        Hash(platform::le_bytes_from_words_32(&cv))
464    }
465
466    fn root_output_block(&self) -> [u8; 2 * OUT_LEN] {
467        self.platform.compress_xof(
468            &self.input_chaining_value,
469            &self.block,
470            self.block_len,
471            self.counter,
472            self.flags | ROOT,
473        )
474    }
475}
476
477#[cfg(feature = "zeroize")]
478impl Zeroize for Output {
479    fn zeroize(&mut self) {
480        // Destructuring to trigger compile error as a reminder to update this impl.
481        let Self {
482            input_chaining_value,
483            block,
484            block_len,
485            counter,
486            flags,
487            platform: _,
488        } = self;
489
490        input_chaining_value.zeroize();
491        block.zeroize();
492        block_len.zeroize();
493        counter.zeroize();
494        flags.zeroize();
495    }
496}
497
498#[derive(Clone)]
499struct ChunkState {
500    cv: CVWords,
501    chunk_counter: u64,
502    buf: [u8; BLOCK_LEN],
503    buf_len: u8,
504    blocks_compressed: u8,
505    flags: u8,
506    platform: Platform,
507}
508
509impl ChunkState {
510    fn new(key: &CVWords, chunk_counter: u64, flags: u8, platform: Platform) -> Self {
511        Self {
512            cv: *key,
513            chunk_counter,
514            buf: [0; BLOCK_LEN],
515            buf_len: 0,
516            blocks_compressed: 0,
517            flags,
518            platform,
519        }
520    }
521
522    fn count(&self) -> usize {
523        BLOCK_LEN * self.blocks_compressed as usize + self.buf_len as usize
524    }
525
526    fn fill_buf(&mut self, input: &mut &[u8]) {
527        let want = BLOCK_LEN - self.buf_len as usize;
528        let take = cmp::min(want, input.len());
529        self.buf[self.buf_len as usize..][..take].copy_from_slice(&input[..take]);
530        self.buf_len += take as u8;
531        *input = &input[take..];
532    }
533
534    fn start_flag(&self) -> u8 {
535        if self.blocks_compressed == 0 {
536            CHUNK_START
537        } else {
538            0
539        }
540    }
541
542    // Try to avoid buffering as much as possible, by compressing directly from
543    // the input slice when full blocks are available.
544    fn update(&mut self, mut input: &[u8]) -> &mut Self {
545        if self.buf_len > 0 {
546            self.fill_buf(&mut input);
547            if !input.is_empty() {
548                debug_assert_eq!(self.buf_len as usize, BLOCK_LEN);
549                let block_flags = self.flags | self.start_flag(); // borrowck
550                self.platform.compress_in_place(
551                    &mut self.cv,
552                    &self.buf,
553                    BLOCK_LEN as u8,
554                    self.chunk_counter,
555                    block_flags,
556                );
557                self.buf_len = 0;
558                self.buf = [0; BLOCK_LEN];
559                self.blocks_compressed += 1;
560            }
561        }
562
563        while input.len() > BLOCK_LEN {
564            debug_assert_eq!(self.buf_len, 0);
565            let block_flags = self.flags | self.start_flag(); // borrowck
566            self.platform.compress_in_place(
567                &mut self.cv,
568                array_ref!(input, 0, BLOCK_LEN),
569                BLOCK_LEN as u8,
570                self.chunk_counter,
571                block_flags,
572            );
573            self.blocks_compressed += 1;
574            input = &input[BLOCK_LEN..];
575        }
576
577        self.fill_buf(&mut input);
578        debug_assert!(input.is_empty());
579        debug_assert!(self.count() <= CHUNK_LEN);
580        self
581    }
582
583    fn output(&self) -> Output {
584        let block_flags = self.flags | self.start_flag() | CHUNK_END;
585        Output {
586            input_chaining_value: self.cv,
587            block: self.buf,
588            block_len: self.buf_len,
589            counter: self.chunk_counter,
590            flags: block_flags,
591            platform: self.platform,
592        }
593    }
594}
595
596// Don't derive(Debug), because the state may be secret.
597impl fmt::Debug for ChunkState {
598    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
599        f.debug_struct("ChunkState")
600            .field("count", &self.count())
601            .field("chunk_counter", &self.chunk_counter)
602            .field("flags", &self.flags)
603            .field("platform", &self.platform)
604            .finish()
605    }
606}
607
608#[cfg(feature = "zeroize")]
609impl Zeroize for ChunkState {
610    fn zeroize(&mut self) {
611        // Destructuring to trigger compile error as a reminder to update this impl.
612        let Self {
613            cv,
614            chunk_counter,
615            buf,
616            buf_len,
617            blocks_compressed,
618            flags,
619            platform: _,
620        } = self;
621
622        cv.zeroize();
623        chunk_counter.zeroize();
624        buf.zeroize();
625        buf_len.zeroize();
626        blocks_compressed.zeroize();
627        flags.zeroize();
628    }
629}
630
631// IMPLEMENTATION NOTE
632// ===================
633// The recursive function compress_subtree_wide(), implemented below, is the
634// basis of high-performance BLAKE3. We use it both for all-at-once hashing,
635// and for the incremental input with Hasher (though we have to be careful with
636// subtree boundaries in the incremental case). compress_subtree_wide() applies
637// several optimizations at the same time:
638// - Multithreading with Rayon.
639// - Parallel chunk hashing with SIMD.
640// - Parallel parent hashing with SIMD. Note that while SIMD chunk hashing
641//   maxes out at MAX_SIMD_DEGREE*CHUNK_LEN, parallel parent hashing continues
642//   to benefit from larger inputs, because more levels of the tree benefit can
643//   use full-width SIMD vectors for parent hashing. Without parallel parent
644//   hashing, we lose about 10% of overall throughput on AVX2 and AVX-512.
645
646/// Undocumented and unstable, for benchmarks only.
647#[doc(hidden)]
648#[derive(Clone, Copy)]
649pub enum IncrementCounter {
650    Yes,
651    No,
652}
653
654impl IncrementCounter {
655    #[inline]
656    fn yes(&self) -> bool {
657        match self {
658            IncrementCounter::Yes => true,
659            IncrementCounter::No => false,
660        }
661    }
662}
663
664// The largest power of two less than or equal to `n`, used in Hasher::update(). This is similar to
665// left_subtree_len(n), but note that left_subtree_len(n) is strictly less than `n`.
666fn largest_power_of_two_leq(n: usize) -> usize {
667    ((n / 2) + 1).next_power_of_two()
668}
669
670// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
671// on a single thread. Write out the chunk chaining values and return the
672// number of chunks hashed. These chunks are never the root and never empty;
673// those cases use a different codepath.
674fn compress_chunks_parallel(
675    input: &[u8],
676    key: &CVWords,
677    chunk_counter: u64,
678    flags: u8,
679    platform: Platform,
680    out: &mut [u8],
681) -> usize {
682    debug_assert!(!input.is_empty(), "empty chunks below the root");
683    debug_assert!(input.len() <= MAX_SIMD_DEGREE * CHUNK_LEN);
684
685    let mut chunks_exact = input.chunks_exact(CHUNK_LEN);
686    let mut chunks_array = ArrayVec::<&[u8; CHUNK_LEN], MAX_SIMD_DEGREE>::new();
687    for chunk in &mut chunks_exact {
688        chunks_array.push(array_ref!(chunk, 0, CHUNK_LEN));
689    }
690    platform.hash_many(
691        &chunks_array,
692        key,
693        chunk_counter,
694        IncrementCounter::Yes,
695        flags,
696        CHUNK_START,
697        CHUNK_END,
698        out,
699    );
700
701    // Hash the remaining partial chunk, if there is one. Note that the empty
702    // chunk (meaning the empty message) is a different codepath.
703    let chunks_so_far = chunks_array.len();
704    if !chunks_exact.remainder().is_empty() {
705        let counter = chunk_counter + chunks_so_far as u64;
706        let mut chunk_state = ChunkState::new(key, counter, flags, platform);
707        chunk_state.update(chunks_exact.remainder());
708        *array_mut_ref!(out, chunks_so_far * OUT_LEN, OUT_LEN) =
709            chunk_state.output().chaining_value();
710        chunks_so_far + 1
711    } else {
712        chunks_so_far
713    }
714}
715
716// Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
717// on a single thread. Write out the parent chaining values and return the
718// number of parents hashed. (If there's an odd input chaining value left over,
719// return it as an additional output.) These parents are never the root and
720// never empty; those cases use a different codepath.
721fn compress_parents_parallel(
722    child_chaining_values: &[u8],
723    key: &CVWords,
724    flags: u8,
725    platform: Platform,
726    out: &mut [u8],
727) -> usize {
728    debug_assert_eq!(child_chaining_values.len() % OUT_LEN, 0, "wacky hash bytes");
729    let num_children = child_chaining_values.len() / OUT_LEN;
730    debug_assert!(num_children >= 2, "not enough children");
731    debug_assert!(num_children <= 2 * MAX_SIMD_DEGREE_OR_2, "too many");
732
733    let mut parents_exact = child_chaining_values.chunks_exact(BLOCK_LEN);
734    // Use MAX_SIMD_DEGREE_OR_2 rather than MAX_SIMD_DEGREE here, because of
735    // the requirements of compress_subtree_wide().
736    let mut parents_array = ArrayVec::<&[u8; BLOCK_LEN], MAX_SIMD_DEGREE_OR_2>::new();
737    for parent in &mut parents_exact {
738        parents_array.push(array_ref!(parent, 0, BLOCK_LEN));
739    }
740    platform.hash_many(
741        &parents_array,
742        key,
743        0, // Parents always use counter 0.
744        IncrementCounter::No,
745        flags | PARENT,
746        0, // Parents have no start flags.
747        0, // Parents have no end flags.
748        out,
749    );
750
751    // If there's an odd child left over, it becomes an output.
752    let parents_so_far = parents_array.len();
753    if !parents_exact.remainder().is_empty() {
754        out[parents_so_far * OUT_LEN..][..OUT_LEN].copy_from_slice(parents_exact.remainder());
755        parents_so_far + 1
756    } else {
757        parents_so_far
758    }
759}
760
761// The wide helper function returns (writes out) an array of chaining values
762// and returns the length of that array. The number of chaining values returned
763// is the dynamically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
764// if the input is shorter than that many chunks. The reason for maintaining a
765// wide array of chaining values going back up the tree, is to allow the
766// implementation to hash as many parents in parallel as possible.
767//
768// As a special case when the SIMD degree is 1, this function will still return
769// at least 2 outputs. This guarantees that this function doesn't perform the
770// root compression. (If it did, it would use the wrong flags, and also we
771// wouldn't be able to implement extendable output.) Note that this function is
772// not used when the whole input is only 1 chunk long; that's a different
773// codepath.
774//
775// Why not just have the caller split the input on the first update(), instead
776// of implementing this special rule? Because we don't want to limit SIMD or
777// multithreading parallelism for that update().
778fn compress_subtree_wide<J: join::Join>(
779    input: &[u8],
780    key: &CVWords,
781    chunk_counter: u64,
782    flags: u8,
783    platform: Platform,
784    out: &mut [u8],
785) -> usize {
786    // Note that the single chunk case does *not* bump the SIMD degree up to 2
787    // when it is 1. This allows Rayon the option of multithreading even the
788    // 2-chunk case, which can help performance on smaller platforms.
789    if input.len() <= platform.simd_degree() * CHUNK_LEN {
790        return compress_chunks_parallel(input, key, chunk_counter, flags, platform, out);
791    }
792
793    // With more than simd_degree chunks, we need to recurse. Start by dividing
794    // the input into left and right subtrees. (Note that this is only optimal
795    // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
796    // of 3 or something, we'll need a more complicated strategy.)
797    debug_assert_eq!(platform.simd_degree().count_ones(), 1, "power of 2");
798    let (left, right) = input.split_at(hazmat::left_subtree_len(input.len() as u64) as usize);
799    let right_chunk_counter = chunk_counter + (left.len() / CHUNK_LEN) as u64;
800
801    // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
802    // account for the special case of returning 2 outputs when the SIMD degree
803    // is 1.
804    let mut cv_array = [0; 2 * MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
805    let degree = if left.len() == CHUNK_LEN {
806        // The "simd_degree=1 and we're at the leaf nodes" case.
807        debug_assert_eq!(platform.simd_degree(), 1);
808        1
809    } else {
810        cmp::max(platform.simd_degree(), 2)
811    };
812    let (left_out, right_out) = cv_array.split_at_mut(degree * OUT_LEN);
813
814    // Recurse! For update_rayon(), this is where we take advantage of RayonJoin and use multiple
815    // threads.
816    let (left_n, right_n) = J::join(
817        || compress_subtree_wide::<J>(left, key, chunk_counter, flags, platform, left_out),
818        || compress_subtree_wide::<J>(right, key, right_chunk_counter, flags, platform, right_out),
819    );
820
821    // The special case again. If simd_degree=1, then we'll have left_n=1 and
822    // right_n=1. Rather than compressing them into a single output, return
823    // them directly, to make sure we always have at least two outputs.
824    debug_assert_eq!(left_n, degree);
825    debug_assert!(right_n >= 1 && right_n <= left_n);
826    if left_n == 1 {
827        out[..2 * OUT_LEN].copy_from_slice(&cv_array[..2 * OUT_LEN]);
828        return 2;
829    }
830
831    // Otherwise, do one layer of parent node compression.
832    let num_children = left_n + right_n;
833    compress_parents_parallel(
834        &cv_array[..num_children * OUT_LEN],
835        key,
836        flags,
837        platform,
838        out,
839    )
840}
841
842// Hash a subtree with compress_subtree_wide(), and then condense the resulting
843// list of chaining values down to a single parent node. Don't compress that
844// last parent node, however. Instead, return its message bytes (the
845// concatenated chaining values of its children). This is necessary when the
846// first call to update() supplies a complete subtree, because the topmost
847// parent node of that subtree could end up being the root. It's also necessary
848// for extended output in the general case.
849//
850// As with compress_subtree_wide(), this function is not used on inputs of 1
851// chunk or less. That's a different codepath.
852fn compress_subtree_to_parent_node<J: join::Join>(
853    input: &[u8],
854    key: &CVWords,
855    chunk_counter: u64,
856    flags: u8,
857    platform: Platform,
858) -> [u8; BLOCK_LEN] {
859    debug_assert!(input.len() > CHUNK_LEN);
860    let mut cv_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN];
861    let mut num_cvs =
862        compress_subtree_wide::<J>(input, &key, chunk_counter, flags, platform, &mut cv_array);
863    debug_assert!(num_cvs >= 2);
864
865    // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
866    // compress_subtree_wide() returns more than 2 chaining values. Condense
867    // them into 2 by forming parent nodes repeatedly.
868    let mut out_array = [0; MAX_SIMD_DEGREE_OR_2 * OUT_LEN / 2];
869    while num_cvs > 2 {
870        let cv_slice = &cv_array[..num_cvs * OUT_LEN];
871        num_cvs = compress_parents_parallel(cv_slice, key, flags, platform, &mut out_array);
872        cv_array[..num_cvs * OUT_LEN].copy_from_slice(&out_array[..num_cvs * OUT_LEN]);
873    }
874    *array_ref!(cv_array, 0, 2 * OUT_LEN)
875}
876
877// Hash a complete input all at once. Unlike compress_subtree_wide() and
878// compress_subtree_to_parent_node(), this function handles the 1 chunk case.
879fn hash_all_at_once<J: join::Join>(input: &[u8], key: &CVWords, flags: u8) -> Output {
880    let platform = Platform::detect();
881
882    // If the whole subtree is one chunk, hash it directly with a ChunkState.
883    if input.len() <= CHUNK_LEN {
884        return ChunkState::new(key, 0, flags, platform)
885            .update(input)
886            .output();
887    }
888
889    // Otherwise construct an Output object from the parent node returned by
890    // compress_subtree_to_parent_node().
891    Output {
892        input_chaining_value: *key,
893        block: compress_subtree_to_parent_node::<J>(input, key, 0, flags, platform),
894        block_len: BLOCK_LEN as u8,
895        counter: 0,
896        flags: flags | PARENT,
897        platform,
898    }
899}
900
901/// The default hash function.
902///
903/// For an incremental version that accepts multiple writes, see [`Hasher::new`],
904/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
905///
906/// ```
907/// let hash = blake3::hash(b"foo");
908/// # let hash1 = hash;
909///
910/// let hash = blake3::Hasher::new().update(b"foo").finalize();
911/// # let hash2 = hash;
912/// # assert_eq!(hash1, hash2);
913/// ```
914///
915/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`] and
916/// [`OutputReader`].
917///
918/// This function is always single-threaded. For multithreading support, see
919/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
920pub fn hash(input: &[u8]) -> Hash {
921    hash_all_at_once::<join::SerialJoin>(input, IV, 0).root_hash()
922}
923
924/// The keyed hash function.
925///
926/// This is suitable for use as a message authentication code, for example to
927/// replace an HMAC instance. In that use case, the constant-time equality
928/// checking provided by [`Hash`](struct.Hash.html) is almost always a security
929/// requirement, and callers need to be careful not to compare MACs as raw
930/// bytes.
931///
932/// For an incremental version that accepts multiple writes, see [`Hasher::new_keyed`],
933/// [`Hasher::update`], and [`Hasher::finalize`]. These two lines are equivalent:
934///
935/// ```
936/// # const KEY: &[u8; 32] = &[0; 32];
937/// let mac = blake3::keyed_hash(KEY, b"foo");
938/// # let mac1 = mac;
939///
940/// let mac = blake3::Hasher::new_keyed(KEY).update(b"foo").finalize();
941/// # let mac2 = mac;
942/// # assert_eq!(mac1, mac2);
943/// ```
944///
945/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
946///
947/// This function is always single-threaded. For multithreading support, see
948/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
949pub fn keyed_hash(key: &[u8; KEY_LEN], input: &[u8]) -> Hash {
950    let key_words = platform::words_from_le_bytes_32(key);
951    hash_all_at_once::<join::SerialJoin>(input, &key_words, KEYED_HASH).root_hash()
952}
953
954/// The key derivation function.
955///
956/// Given cryptographic key material of any length and a context string of any
957/// length, this function outputs a 32-byte derived subkey. **The context string
958/// should be hardcoded, globally unique, and application-specific.** A good
959/// default format for such strings is `"[application] [commit timestamp]
960/// [purpose]"`, e.g., `"example.com 2019-12-25 16:18:03 session tokens v1"`.
961///
962/// Key derivation is important when you want to use the same key in multiple
963/// algorithms or use cases. Using the same key with different cryptographic
964/// algorithms is generally forbidden, and deriving a separate subkey for each
965/// use case protects you from bad interactions. Derived keys also mitigate the
966/// damage from one part of your application accidentally leaking its key.
967///
968/// As a rare exception to that general rule, however, it is possible to use
969/// `derive_key` itself with key material that you are already using with
970/// another algorithm. You might need to do this if you're adding features to
971/// an existing application, which does not yet use key derivation internally.
972/// However, you still must not share key material with algorithms that forbid
973/// key reuse entirely, like a one-time pad. For more on this, see sections 6.2
974/// and 7.8 of the [BLAKE3 paper](https://github.com/BLAKE3-team/BLAKE3-specs/blob/master/blake3.pdf).
975///
976/// Note that BLAKE3 is not a password hash, and **`derive_key` should never be
977/// used with passwords.** Instead, use a dedicated password hash like
978/// [Argon2]. Password hashes are entirely different from generic hash
979/// functions, with opposite design requirements.
980///
981/// For an incremental version that accepts multiple writes, see [`Hasher::new_derive_key`],
982/// [`Hasher::update`], and [`Hasher::finalize`]. These two statements are equivalent:
983///
984/// ```
985/// # const CONTEXT: &str = "example.com 2019-12-25 16:18:03 session tokens v1";
986/// let key = blake3::derive_key(CONTEXT, b"key material, not a password");
987/// # let key1 = key;
988///
989/// let key: [u8; 32] = blake3::Hasher::new_derive_key(CONTEXT)
990///     .update(b"key material, not a password")
991///     .finalize()
992///     .into();
993/// # let key2 = key;
994/// # assert_eq!(key1, key2);
995/// ```
996///
997/// For output sizes other than 32 bytes, see [`Hasher::finalize_xof`], and [`OutputReader`].
998///
999/// This function is always single-threaded. For multithreading support, see
1000/// [`Hasher::update_rayon`](struct.Hasher.html#method.update_rayon).
1001///
1002/// [Argon2]: https://en.wikipedia.org/wiki/Argon2
1003pub fn derive_key(context: &str, key_material: &[u8]) -> [u8; OUT_LEN] {
1004    let context_key = hazmat::hash_derive_key_context(context);
1005    let context_key_words = platform::words_from_le_bytes_32(&context_key);
1006    hash_all_at_once::<join::SerialJoin>(key_material, &context_key_words, DERIVE_KEY_MATERIAL)
1007        .root_hash()
1008        .0
1009}
1010
1011fn parent_node_output(
1012    left_child: &CVBytes,
1013    right_child: &CVBytes,
1014    key: &CVWords,
1015    flags: u8,
1016    platform: Platform,
1017) -> Output {
1018    let mut block = [0; BLOCK_LEN];
1019    block[..32].copy_from_slice(left_child);
1020    block[32..].copy_from_slice(right_child);
1021    Output {
1022        input_chaining_value: *key,
1023        block,
1024        block_len: BLOCK_LEN as u8,
1025        counter: 0,
1026        flags: flags | PARENT,
1027        platform,
1028    }
1029}
1030
1031/// An incremental hash state that can accept any number of writes.
1032///
1033/// The `rayon` and `mmap` Cargo features enable additional methods on this
1034/// type related to multithreading and memory-mapped IO.
1035///
1036/// When the `traits-preview` Cargo feature is enabled, this type implements
1037/// several commonly used traits from the
1038/// [`digest`](https://crates.io/crates/digest) crate. However, those
1039/// traits aren't stable, and they're expected to change in incompatible ways
1040/// before that crate reaches 1.0. For that reason, this crate makes no SemVer
1041/// guarantees for this feature, and callers who use it should expect breaking
1042/// changes between patch versions.
1043///
1044/// # Examples
1045///
1046/// ```
1047/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
1048/// // Hash an input incrementally.
1049/// let mut hasher = blake3::Hasher::new();
1050/// hasher.update(b"foo");
1051/// hasher.update(b"bar");
1052/// hasher.update(b"baz");
1053/// assert_eq!(hasher.finalize(), blake3::hash(b"foobarbaz"));
1054///
1055/// // Extended output. OutputReader also implements Read and Seek.
1056/// # #[cfg(feature = "std")] {
1057/// let mut output = [0; 1000];
1058/// let mut output_reader = hasher.finalize_xof();
1059/// output_reader.fill(&mut output);
1060/// assert_eq!(&output[..32], blake3::hash(b"foobarbaz").as_bytes());
1061/// # }
1062/// # Ok(())
1063/// # }
1064/// ```
1065#[derive(Clone)]
1066pub struct Hasher {
1067    key: CVWords,
1068    chunk_state: ChunkState,
1069    initial_chunk_counter: u64,
1070    // The stack size is MAX_DEPTH + 1 because we do lazy merging. For example,
1071    // with 7 chunks, we have 3 entries in the stack. Adding an 8th chunk
1072    // requires a 4th entry, rather than merging everything down to 1, because
1073    // we don't know whether more input is coming. This is different from how
1074    // the reference implementation does things.
1075    cv_stack: ArrayVec<CVBytes, { MAX_DEPTH + 1 }>,
1076}
1077
1078impl Hasher {
1079    fn new_internal(key: &CVWords, flags: u8) -> Self {
1080        Self {
1081            key: *key,
1082            chunk_state: ChunkState::new(key, 0, flags, Platform::detect()),
1083            initial_chunk_counter: 0,
1084            cv_stack: ArrayVec::new(),
1085        }
1086    }
1087
1088    /// Construct a new `Hasher` for the regular hash function.
1089    pub fn new() -> Self {
1090        Self::new_internal(IV, 0)
1091    }
1092
1093    /// Construct a new `Hasher` for the keyed hash function. See
1094    /// [`keyed_hash`].
1095    ///
1096    /// [`keyed_hash`]: fn.keyed_hash.html
1097    pub fn new_keyed(key: &[u8; KEY_LEN]) -> Self {
1098        let key_words = platform::words_from_le_bytes_32(key);
1099        Self::new_internal(&key_words, KEYED_HASH)
1100    }
1101
1102    /// Construct a new `Hasher` for the key derivation function. See
1103    /// [`derive_key`]. The context string should be hardcoded, globally
1104    /// unique, and application-specific.
1105    ///
1106    /// [`derive_key`]: fn.derive_key.html
1107    pub fn new_derive_key(context: &str) -> Self {
1108        let context_key = hazmat::hash_derive_key_context(context);
1109        let context_key_words = platform::words_from_le_bytes_32(&context_key);
1110        Self::new_internal(&context_key_words, DERIVE_KEY_MATERIAL)
1111    }
1112
1113    /// Reset the `Hasher` to its initial state.
1114    ///
1115    /// This is functionally the same as overwriting the `Hasher` with a new
1116    /// one, using the same key or context string if any.
1117    pub fn reset(&mut self) -> &mut Self {
1118        self.chunk_state = ChunkState::new(
1119            &self.key,
1120            0,
1121            self.chunk_state.flags,
1122            self.chunk_state.platform,
1123        );
1124        self.cv_stack.clear();
1125        self
1126    }
1127
1128    // As described in push_cv() below, we do "lazy merging", delaying merges
1129    // until right before the next CV is about to be added. This is different
1130    // from the reference implementation. Another difference is that we aren't
1131    // always merging 1 chunk at a time. Instead, each CV might represent any
1132    // power-of-two number of chunks, as long as the smaller-above-larger stack
1133    // order is maintained. Instead of the "count the trailing 0-bits"
1134    // algorithm described in the spec (which assumes you're adding one chunk
1135    // at a time), we use a "count the total number of 1-bits" variant (which
1136    // doesn't assume that). The principle is the same: each CV that should
1137    // remain in the stack is represented by a 1-bit in the total number of
1138    // chunks (or bytes) so far.
1139    fn merge_cv_stack(&mut self, chunk_counter: u64) {
1140        // Account for non-zero cases of Hasher::set_input_offset, where there are no prior
1141        // subtrees in the stack. Note that initial_chunk_counter is always 0 for callers who don't
1142        // use the hazmat module.
1143        let post_merge_stack_len =
1144            (chunk_counter - self.initial_chunk_counter).count_ones() as usize;
1145        while self.cv_stack.len() > post_merge_stack_len {
1146            let right_child = self.cv_stack.pop().unwrap();
1147            let left_child = self.cv_stack.pop().unwrap();
1148            let parent_output = parent_node_output(
1149                &left_child,
1150                &right_child,
1151                &self.key,
1152                self.chunk_state.flags,
1153                self.chunk_state.platform,
1154            );
1155            self.cv_stack.push(parent_output.chaining_value());
1156        }
1157    }
1158
1159    // In reference_impl.rs, we merge the new CV with existing CVs from the
1160    // stack before pushing it. We can do that because we know more input is
1161    // coming, so we know none of the merges are root.
1162    //
1163    // This setting is different. We want to feed as much input as possible to
1164    // compress_subtree_wide(), without setting aside anything for the
1165    // chunk_state. If the user gives us 64 KiB, we want to parallelize over
1166    // all 64 KiB at once as a single subtree, if at all possible.
1167    //
1168    // This leads to two problems:
1169    // 1) This 64 KiB input might be the only call that ever gets made to
1170    //    update. In this case, the root node of the 64 KiB subtree would be
1171    //    the root node of the whole tree, and it would need to be ROOT
1172    //    finalized. We can't compress it until we know.
1173    // 2) This 64 KiB input might complete a larger tree, whose root node is
1174    //    similarly going to be the root of the whole tree. For example,
1175    //    maybe we have 196 KiB (that is, 128 + 64) hashed so far. We can't
1176    //    compress the node at the root of the 256 KiB subtree until we know
1177    //    how to finalize it.
1178    //
1179    // The second problem is solved with "lazy merging". That is, when we're
1180    // about to add a CV to the stack, we don't merge it with anything first,
1181    // as the reference impl does. Instead we do merges using the *previous* CV
1182    // that was added, which is sitting on top of the stack, and we put the new
1183    // CV (unmerged) on top of the stack afterwards. This guarantees that we
1184    // never merge the root node until finalize().
1185    //
1186    // Solving the first problem requires an additional tool,
1187    // compress_subtree_to_parent_node(). That function always returns the top
1188    // *two* chaining values of the subtree it's compressing. We then do lazy
1189    // merging with each of them separately, so that the second CV will always
1190    // remain unmerged. (That also helps us support extendable output when
1191    // we're hashing an input all-at-once.)
1192    fn push_cv(&mut self, new_cv: &CVBytes, chunk_counter: u64) {
1193        self.merge_cv_stack(chunk_counter);
1194        self.cv_stack.push(*new_cv);
1195    }
1196
1197    /// Add input bytes to the hash state. You can call this any number of times.
1198    ///
1199    /// This method is always single-threaded. For multithreading support, see
1200    /// [`update_rayon`](#method.update_rayon) (enabled with the `rayon` Cargo feature).
1201    ///
1202    /// Note that the degree of SIMD parallelism that `update` can use is limited by the size of
1203    /// this input buffer. See [`update_reader`](#method.update_reader).
1204    pub fn update(&mut self, input: &[u8]) -> &mut Self {
1205        self.update_with_join::<join::SerialJoin>(input)
1206    }
1207
1208    fn update_with_join<J: join::Join>(&mut self, mut input: &[u8]) -> &mut Self {
1209        let input_offset = self.initial_chunk_counter * CHUNK_LEN as u64;
1210        if let Some(max) = hazmat::max_subtree_len(input_offset) {
1211            let remaining = max - self.count();
1212            assert!(
1213                input.len() as u64 <= remaining,
1214                "the subtree starting at {} contains at most {} bytes (found {})",
1215                CHUNK_LEN as u64 * self.initial_chunk_counter,
1216                max,
1217                input.len(),
1218            );
1219        }
1220        // If we have some partial chunk bytes in the internal chunk_state, we
1221        // need to finish that chunk first.
1222        if self.chunk_state.count() > 0 {
1223            let want = CHUNK_LEN - self.chunk_state.count();
1224            let take = cmp::min(want, input.len());
1225            self.chunk_state.update(&input[..take]);
1226            input = &input[take..];
1227            if !input.is_empty() {
1228                // We've filled the current chunk, and there's more input
1229                // coming, so we know it's not the root and we can finalize it.
1230                // Then we'll proceed to hashing whole chunks below.
1231                debug_assert_eq!(self.chunk_state.count(), CHUNK_LEN);
1232                let chunk_cv = self.chunk_state.output().chaining_value();
1233                self.push_cv(&chunk_cv, self.chunk_state.chunk_counter);
1234                self.chunk_state = ChunkState::new(
1235                    &self.key,
1236                    self.chunk_state.chunk_counter + 1,
1237                    self.chunk_state.flags,
1238                    self.chunk_state.platform,
1239                );
1240            } else {
1241                return self;
1242            }
1243        }
1244
1245        // Now the chunk_state is clear, and we have more input. If there's
1246        // more than a single chunk (so, definitely not the root chunk), hash
1247        // the largest whole subtree we can, with the full benefits of SIMD and
1248        // multithreading parallelism. Two restrictions:
1249        // - The subtree has to be a power-of-2 number of chunks. Only subtrees
1250        //   along the right edge can be incomplete, and we don't know where
1251        //   the right edge is going to be until we get to finalize().
1252        // - The subtree must evenly divide the total number of chunks up until
1253        //   this point (if total is not 0). If the current incomplete subtree
1254        //   is only waiting for 1 more chunk, we can't hash a subtree of 4
1255        //   chunks. We have to complete the current subtree first.
1256        // Because we might need to break up the input to form powers of 2, or
1257        // to evenly divide what we already have, this part runs in a loop.
1258        while input.len() > CHUNK_LEN {
1259            debug_assert_eq!(self.chunk_state.count(), 0, "no partial chunk data");
1260            debug_assert_eq!(CHUNK_LEN.count_ones(), 1, "power of 2 chunk len");
1261            let mut subtree_len = largest_power_of_two_leq(input.len());
1262            let count_so_far = self.chunk_state.chunk_counter * CHUNK_LEN as u64;
1263            // Shrink the subtree_len until it evenly divides the count so far.
1264            // We know that subtree_len itself is a power of 2, so we can use a
1265            // bitmasking trick instead of an actual remainder operation. (Note
1266            // that if the caller consistently passes power-of-2 inputs of the
1267            // same size, as is hopefully typical, this loop condition will
1268            // always fail, and subtree_len will always be the full length of
1269            // the input.)
1270            //
1271            // An aside: We don't have to shrink subtree_len quite this much.
1272            // For example, if count_so_far is 1, we could pass 2 chunks to
1273            // compress_subtree_to_parent_node. Since we'll get 2 CVs back,
1274            // we'll still get the right answer in the end, and we might get to
1275            // use 2-way SIMD parallelism. The problem with this optimization,
1276            // is that it gets us stuck always hashing 2 chunks. The total
1277            // number of chunks will remain odd, and we'll never graduate to
1278            // higher degrees of parallelism. See
1279            // https://github.com/BLAKE3-team/BLAKE3/issues/69.
1280            while (subtree_len - 1) as u64 & count_so_far != 0 {
1281                subtree_len /= 2;
1282            }
1283            // The shrunken subtree_len might now be 1 chunk long. If so, hash
1284            // that one chunk by itself. Otherwise, compress the subtree into a
1285            // pair of CVs.
1286            let subtree_chunks = (subtree_len / CHUNK_LEN) as u64;
1287            if subtree_len <= CHUNK_LEN {
1288                debug_assert_eq!(subtree_len, CHUNK_LEN);
1289                self.push_cv(
1290                    &ChunkState::new(
1291                        &self.key,
1292                        self.chunk_state.chunk_counter,
1293                        self.chunk_state.flags,
1294                        self.chunk_state.platform,
1295                    )
1296                    .update(&input[..subtree_len])
1297                    .output()
1298                    .chaining_value(),
1299                    self.chunk_state.chunk_counter,
1300                );
1301            } else {
1302                // This is the high-performance happy path, though getting here
1303                // depends on the caller giving us a long enough input.
1304                let cv_pair = compress_subtree_to_parent_node::<J>(
1305                    &input[..subtree_len],
1306                    &self.key,
1307                    self.chunk_state.chunk_counter,
1308                    self.chunk_state.flags,
1309                    self.chunk_state.platform,
1310                );
1311                let left_cv = array_ref!(cv_pair, 0, 32);
1312                let right_cv = array_ref!(cv_pair, 32, 32);
1313                // Push the two CVs we received into the CV stack in order. Because
1314                // the stack merges lazily, this guarantees we aren't merging the
1315                // root.
1316                self.push_cv(left_cv, self.chunk_state.chunk_counter);
1317                self.push_cv(
1318                    right_cv,
1319                    self.chunk_state.chunk_counter + (subtree_chunks / 2),
1320                );
1321            }
1322            self.chunk_state.chunk_counter += subtree_chunks;
1323            input = &input[subtree_len..];
1324        }
1325
1326        // What remains is 1 chunk or less. Add it to the chunk state.
1327        debug_assert!(input.len() <= CHUNK_LEN);
1328        if !input.is_empty() {
1329            self.chunk_state.update(input);
1330            // Having added some input to the chunk_state, we know what's in
1331            // the CV stack won't become the root node, and we can do an extra
1332            // merge. This simplifies finalize().
1333            self.merge_cv_stack(self.chunk_state.chunk_counter);
1334        }
1335
1336        self
1337    }
1338
1339    fn final_output(&self) -> Output {
1340        // If the current chunk is the only chunk, that makes it the root node
1341        // also. Convert it directly into an Output. Otherwise, we need to
1342        // merge subtrees below.
1343        if self.cv_stack.is_empty() {
1344            debug_assert_eq!(self.chunk_state.chunk_counter, self.initial_chunk_counter);
1345            return self.chunk_state.output();
1346        }
1347
1348        // If there are any bytes in the ChunkState, finalize that chunk and
1349        // merge its CV with everything in the CV stack. In that case, the work
1350        // we did at the end of update() above guarantees that the stack
1351        // doesn't contain any unmerged subtrees that need to be merged first.
1352        // (This is important, because if there were two chunk hashes sitting
1353        // on top of the stack, they would need to merge with each other, and
1354        // merging a new chunk hash into them would be incorrect.)
1355        //
1356        // If there are no bytes in the ChunkState, we'll merge what's already
1357        // in the stack. In this case it's fine if there are unmerged chunks on
1358        // top, because we'll merge them with each other. Note that the case of
1359        // the empty chunk is taken care of above.
1360        let mut output: Output;
1361        let mut num_cvs_remaining = self.cv_stack.len();
1362        if self.chunk_state.count() > 0 {
1363            debug_assert_eq!(
1364                self.cv_stack.len(),
1365                (self.chunk_state.chunk_counter - self.initial_chunk_counter).count_ones() as usize,
1366                "cv stack does not need a merge",
1367            );
1368            output = self.chunk_state.output();
1369        } else {
1370            debug_assert!(self.cv_stack.len() >= 2);
1371            output = parent_node_output(
1372                &self.cv_stack[num_cvs_remaining - 2],
1373                &self.cv_stack[num_cvs_remaining - 1],
1374                &self.key,
1375                self.chunk_state.flags,
1376                self.chunk_state.platform,
1377            );
1378            num_cvs_remaining -= 2;
1379        }
1380        while num_cvs_remaining > 0 {
1381            output = parent_node_output(
1382                &self.cv_stack[num_cvs_remaining - 1],
1383                &output.chaining_value(),
1384                &self.key,
1385                self.chunk_state.flags,
1386                self.chunk_state.platform,
1387            );
1388            num_cvs_remaining -= 1;
1389        }
1390        output
1391    }
1392
1393    /// Finalize the hash state and return the [`Hash`](struct.Hash.html) of
1394    /// the input.
1395    ///
1396    /// This method is idempotent. Calling it twice will give the same result.
1397    /// You can also add more input and finalize again.
1398    pub fn finalize(&self) -> Hash {
1399        assert_eq!(
1400            self.initial_chunk_counter, 0,
1401            "set_input_offset must be used with finalize_non_root",
1402        );
1403        self.final_output().root_hash()
1404    }
1405
1406    /// Finalize the hash state and return an [`OutputReader`], which can
1407    /// supply any number of output bytes.
1408    ///
1409    /// This method is idempotent. Calling it twice will give the same result.
1410    /// You can also add more input and finalize again.
1411    ///
1412    /// [`OutputReader`]: struct.OutputReader.html
1413    pub fn finalize_xof(&self) -> OutputReader {
1414        assert_eq!(
1415            self.initial_chunk_counter, 0,
1416            "set_input_offset must be used with finalize_non_root",
1417        );
1418        OutputReader::new(self.final_output())
1419    }
1420
1421    /// Return the total number of bytes hashed so far.
1422    ///
1423    /// [`hazmat::HasherExt::set_input_offset`] does not affect this value. This only counts bytes
1424    /// passed to [`update`](Hasher::update).
1425    pub fn count(&self) -> u64 {
1426        // Account for non-zero cases of Hasher::set_input_offset. Note that initial_chunk_counter
1427        // is always 0 for callers who don't use the hazmat module.
1428        (self.chunk_state.chunk_counter - self.initial_chunk_counter) * CHUNK_LEN as u64
1429            + self.chunk_state.count() as u64
1430    }
1431
1432    /// As [`update`](Hasher::update), but reading from a
1433    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) implementation.
1434    ///
1435    /// [`Hasher`] implements
1436    /// [`std::io::Write`](https://doc.rust-lang.org/std/io/trait.Write.html), so it's possible to
1437    /// use [`std::io::copy`](https://doc.rust-lang.org/std/io/fn.copy.html) to update a [`Hasher`]
1438    /// from any reader. Unfortunately, this standard approach can limit performance, because
1439    /// `copy` currently uses an internal 8 KiB buffer that isn't big enough to take advantage of
1440    /// all SIMD instruction sets. (In particular, [AVX-512](https://en.wikipedia.org/wiki/AVX-512)
1441    /// needs a 16 KiB buffer.) `update_reader` avoids this performance problem and is slightly
1442    /// more convenient.
1443    ///
1444    /// The internal buffer size this method uses may change at any time, and it may be different
1445    /// for different targets. The only guarantee is that it will be large enough for all of this
1446    /// crate's SIMD implementations on the current platform.
1447    ///
1448    /// The most common implementer of
1449    /// [`std::io::Read`](https://doc.rust-lang.org/std/io/trait.Read.html) might be
1450    /// [`std::fs::File`](https://doc.rust-lang.org/std/fs/struct.File.html), but note that memory
1451    /// mapping can be faster than this method for hashing large files. See
1452    /// [`update_mmap`](Hasher::update_mmap) and [`update_mmap_rayon`](Hasher::update_mmap_rayon),
1453    /// which require the `mmap` and (for the latter) `rayon` Cargo features.
1454    ///
1455    /// This method requires the `std` Cargo feature, which is enabled by default.
1456    ///
1457    /// # Example
1458    ///
1459    /// ```no_run
1460    /// # use std::fs::File;
1461    /// # use std::io;
1462    /// # fn main() -> io::Result<()> {
1463    /// // Hash standard input.
1464    /// let mut hasher = blake3::Hasher::new();
1465    /// hasher.update_reader(std::io::stdin().lock())?;
1466    /// println!("{}", hasher.finalize());
1467    /// # Ok(())
1468    /// # }
1469    /// ```
1470    #[cfg(feature = "std")]
1471    pub fn update_reader(&mut self, reader: impl std::io::Read) -> std::io::Result<&mut Self> {
1472        io::copy_wide(reader, self)?;
1473        Ok(self)
1474    }
1475
1476    /// As [`update`](Hasher::update), but using Rayon-based multithreading
1477    /// internally.
1478    ///
1479    /// This method is gated by the `rayon` Cargo feature, which is disabled by
1480    /// default but enabled on [docs.rs](https://docs.rs).
1481    ///
1482    /// To get any performance benefit from multithreading, the input buffer
1483    /// needs to be large. As a rule of thumb on x86_64, `update_rayon` is
1484    /// _slower_ than `update` for inputs under 128 KiB. That threshold varies
1485    /// quite a lot across different processors, and it's important to benchmark
1486    /// your specific use case. See also the performance warning associated with
1487    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon).
1488    ///
1489    /// If you already have a large buffer in memory, and you want to hash it
1490    /// with multiple threads, this method is a good option. However, reading a
1491    /// file into memory just to call this method can be a performance mistake,
1492    /// both because it requires lots of memory and because single-threaded
1493    /// reads can be slow. For hashing whole files, see
1494    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon), which is gated by both
1495    /// the `rayon` and `mmap` Cargo features.
1496    #[cfg(feature = "rayon")]
1497    pub fn update_rayon(&mut self, input: &[u8]) -> &mut Self {
1498        self.update_with_join::<join::RayonJoin>(input)
1499    }
1500
1501    /// As [`update`](Hasher::update), but reading the contents of a file using memory mapping.
1502    ///
1503    /// Not all files can be memory mapped, and memory mapping small files can be slower than
1504    /// reading them the usual way. In those cases, this method will fall back to standard file IO.
1505    /// The heuristic for whether to use memory mapping is currently very simple (file size >=
1506    /// 16 KiB), and it might change at any time.
1507    ///
1508    /// Like [`update`](Hasher::update), this method is single-threaded. In this author's
1509    /// experience, memory mapping improves single-threaded performance by ~10% for large files
1510    /// that are already in cache. This probably varies between platforms, and as always it's a
1511    /// good idea to benchmark your own use case. In comparison, the multithreaded
1512    /// [`update_mmap_rayon`](Hasher::update_mmap_rayon) method can have a much larger impact on
1513    /// performance.
1514    ///
1515    /// There's a correctness reason that this method takes
1516    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) instead of
1517    /// [`File`](https://doc.rust-lang.org/std/fs/struct.File.html): reading from a memory-mapped
1518    /// file ignores the seek position of the original file handle (it neither respects the current
1519    /// position nor updates the position). This difference in behavior would've caused
1520    /// `update_mmap` and [`update_reader`](Hasher::update_reader) to give different answers and
1521    /// have different side effects in some cases. Taking a
1522    /// [`Path`](https://doc.rust-lang.org/stable/std/path/struct.Path.html) avoids this problem by
1523    /// making it clear that a new [`File`](https://doc.rust-lang.org/std/fs/struct.File.html) is
1524    /// opened internally.
1525    ///
1526    /// This method requires the `mmap` Cargo feature, which is disabled by default but enabled on
1527    /// [docs.rs](https://docs.rs).
1528    ///
1529    /// # Example
1530    ///
1531    /// ```no_run
1532    /// # use std::io;
1533    /// # use std::path::Path;
1534    /// # fn main() -> io::Result<()> {
1535    /// let path = Path::new("file.dat");
1536    /// let mut hasher = blake3::Hasher::new();
1537    /// hasher.update_mmap(path)?;
1538    /// println!("{}", hasher.finalize());
1539    /// # Ok(())
1540    /// # }
1541    /// ```
1542    #[cfg(feature = "mmap")]
1543    pub fn update_mmap(&mut self, path: impl AsRef<std::path::Path>) -> std::io::Result<&mut Self> {
1544        let file = std::fs::File::open(path.as_ref())?;
1545        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1546            self.update(&mmap);
1547        } else {
1548            io::copy_wide(&file, self)?;
1549        }
1550        Ok(self)
1551    }
1552
1553    /// As [`update_rayon`](Hasher::update_rayon), but reading the contents of a file using
1554    /// memory mapping. This is the default behavior of `b3sum`.
1555    ///
1556    /// For large files that are likely to be in cache, this can be much faster than
1557    /// single-threaded hashing. When benchmarks report that BLAKE3 is 10x or 20x faster than other
1558    /// cryptographic hashes, this is usually what they're measuring. However...
1559    ///
1560    /// **Performance Warning:** There are cases where multithreading hurts performance. The worst
1561    /// case is [a large file on a spinning disk](https://github.com/BLAKE3-team/BLAKE3/issues/31),
1562    /// where simultaneous reads from multiple threads can cause "thrashing" (i.e. the disk spends
1563    /// more time seeking around than reading data). Windows tends to be somewhat worse about this,
1564    /// in part because it's less likely than Linux to keep very large files in cache. More
1565    /// generally, if your CPU cores are already busy, then multithreading will add overhead
1566    /// without improving performance. If your code runs in different environments that you don't
1567    /// control and can't measure, then unfortunately there's no one-size-fits-all answer for
1568    /// whether multithreading is a good idea.
1569    ///
1570    /// The memory mapping behavior of this function is the same as
1571    /// [`update_mmap`](Hasher::update_mmap), and the heuristic for when to fall back to standard
1572    /// file IO might change at any time.
1573    ///
1574    /// This method requires both the `mmap` and `rayon` Cargo features, which are disabled by
1575    /// default but enabled on [docs.rs](https://docs.rs).
1576    ///
1577    /// # Example
1578    ///
1579    /// ```no_run
1580    /// # use std::io;
1581    /// # use std::path::Path;
1582    /// # fn main() -> io::Result<()> {
1583    /// # #[cfg(feature = "rayon")]
1584    /// # {
1585    /// let path = Path::new("big_file.dat");
1586    /// let mut hasher = blake3::Hasher::new();
1587    /// hasher.update_mmap_rayon(path)?;
1588    /// println!("{}", hasher.finalize());
1589    /// # }
1590    /// # Ok(())
1591    /// # }
1592    /// ```
1593    #[cfg(feature = "mmap")]
1594    #[cfg(feature = "rayon")]
1595    pub fn update_mmap_rayon(
1596        &mut self,
1597        path: impl AsRef<std::path::Path>,
1598    ) -> std::io::Result<&mut Self> {
1599        let file = std::fs::File::open(path.as_ref())?;
1600        if let Some(mmap) = io::maybe_mmap_file(&file)? {
1601            self.update_rayon(&mmap);
1602        } else {
1603            io::copy_wide(&file, self)?;
1604        }
1605        Ok(self)
1606    }
1607}
1608
1609// Don't derive(Debug), because the state may be secret.
1610impl fmt::Debug for Hasher {
1611    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1612        f.debug_struct("Hasher")
1613            .field("flags", &self.chunk_state.flags)
1614            .field("platform", &self.chunk_state.platform)
1615            .finish()
1616    }
1617}
1618
1619impl Default for Hasher {
1620    #[inline]
1621    fn default() -> Self {
1622        Self::new()
1623    }
1624}
1625
1626#[cfg(feature = "std")]
1627impl std::io::Write for Hasher {
1628    /// This is equivalent to [`update`](#method.update).
1629    #[inline]
1630    fn write(&mut self, input: &[u8]) -> std::io::Result<usize> {
1631        self.update(input);
1632        Ok(input.len())
1633    }
1634
1635    #[inline]
1636    fn flush(&mut self) -> std::io::Result<()> {
1637        Ok(())
1638    }
1639}
1640
1641#[cfg(feature = "zeroize")]
1642impl Zeroize for Hasher {
1643    fn zeroize(&mut self) {
1644        // Destructuring to trigger compile error as a reminder to update this impl.
1645        let Self {
1646            key,
1647            chunk_state,
1648            initial_chunk_counter,
1649            cv_stack,
1650        } = self;
1651
1652        key.zeroize();
1653        chunk_state.zeroize();
1654        initial_chunk_counter.zeroize();
1655        cv_stack.zeroize();
1656    }
1657}
1658
1659/// An incremental reader for extended output, returned by
1660/// [`Hasher::finalize_xof`](struct.Hasher.html#method.finalize_xof).
1661///
1662/// Shorter BLAKE3 outputs are prefixes of longer ones, and explicitly requesting a short output is
1663/// equivalent to truncating the default-length output. Note that this is a difference between
1664/// BLAKE2 and BLAKE3.
1665///
1666/// # Security notes
1667///
1668/// Outputs shorter than the default length of 32 bytes (256 bits) provide less security. An N-bit
1669/// BLAKE3 output is intended to provide N bits of first and second preimage resistance and N/2
1670/// bits of collision resistance, for any N up to 256. Longer outputs don't provide any additional
1671/// security.
1672///
1673/// Avoid relying on the secrecy of the output offset, that is, the number of output bytes read or
1674/// the arguments to [`seek`](struct.OutputReader.html#method.seek) or
1675/// [`set_position`](struct.OutputReader.html#method.set_position). [_Block-Cipher-Based Tree
1676/// Hashing_ by Aldo Gunsing](https://eprint.iacr.org/2022/283) shows that an attacker who knows
1677/// both the message and the key (if any) can easily determine the offset of an extended output.
1678/// For comparison, AES-CTR has a similar property: if you know the key, you can decrypt a block
1679/// from an unknown position in the output stream to recover its block index. Callers with strong
1680/// secret keys aren't affected in practice, but secret offsets are a [design
1681/// smell](https://en.wikipedia.org/wiki/Design_smell) in any case.
1682#[derive(Clone)]
1683pub struct OutputReader {
1684    inner: Output,
1685    position_within_block: u8,
1686}
1687
1688impl OutputReader {
1689    fn new(inner: Output) -> Self {
1690        Self {
1691            inner,
1692            position_within_block: 0,
1693        }
1694    }
1695
1696    // This helper function handles both the case where the output buffer is
1697    // shorter than one block, and the case where our position_within_block is
1698    // non-zero.
1699    fn fill_one_block(&mut self, buf: &mut &mut [u8]) {
1700        let output_block: [u8; BLOCK_LEN] = self.inner.root_output_block();
1701        let output_bytes = &output_block[self.position_within_block as usize..];
1702        let take = cmp::min(buf.len(), output_bytes.len());
1703        buf[..take].copy_from_slice(&output_bytes[..take]);
1704        self.position_within_block += take as u8;
1705        if self.position_within_block == BLOCK_LEN as u8 {
1706            self.inner.counter += 1;
1707            self.position_within_block = 0;
1708        }
1709        // Advance the dest buffer. mem::take() is a borrowck workaround.
1710        *buf = &mut core::mem::take(buf)[take..];
1711    }
1712
1713    /// Fill a buffer with output bytes and advance the position of the
1714    /// `OutputReader`. This is equivalent to [`Read::read`], except that it
1715    /// doesn't return a `Result`. Both methods always fill the entire buffer.
1716    ///
1717    /// Note that `OutputReader` doesn't buffer output bytes internally, so
1718    /// calling `fill` repeatedly with a short-length or odd-length slice will
1719    /// end up performing the same compression multiple times. If you're
1720    /// reading output in a loop, prefer a slice length that's a multiple of
1721    /// [`BLOCK_LEN`] (64 bytes).
1722    ///
1723    /// The maximum output size of BLAKE3 is 2<sup>64</sup>-1 bytes. If you try
1724    /// to extract more than that, for example by seeking near the end and
1725    /// reading further, the behavior is unspecified.
1726    ///
1727    /// [`Read::read`]: #method.read
1728    pub fn fill(&mut self, mut buf: &mut [u8]) {
1729        if buf.is_empty() {
1730            return;
1731        }
1732
1733        // If we're partway through a block, try to get to a block boundary.
1734        if self.position_within_block != 0 {
1735            self.fill_one_block(&mut buf);
1736        }
1737
1738        let full_blocks = buf.len() / BLOCK_LEN;
1739        let full_blocks_len = full_blocks * BLOCK_LEN;
1740        if full_blocks > 0 {
1741            debug_assert_eq!(0, self.position_within_block);
1742            self.inner.platform.xof_many(
1743                &self.inner.input_chaining_value,
1744                &self.inner.block,
1745                self.inner.block_len,
1746                self.inner.counter,
1747                self.inner.flags | ROOT,
1748                &mut buf[..full_blocks_len],
1749            );
1750            self.inner.counter += full_blocks as u64;
1751            buf = &mut buf[full_blocks * BLOCK_LEN..];
1752        }
1753
1754        if !buf.is_empty() {
1755            debug_assert!(buf.len() < BLOCK_LEN);
1756            self.fill_one_block(&mut buf);
1757            debug_assert!(buf.is_empty());
1758        }
1759    }
1760
1761    /// Return the current read position in the output stream. This is
1762    /// equivalent to [`Seek::stream_position`], except that it doesn't return
1763    /// a `Result`. The position of a new `OutputReader` starts at 0, and each
1764    /// call to [`fill`] or [`Read::read`] moves the position forward by the
1765    /// number of bytes read.
1766    ///
1767    /// [`Seek::stream_position`]: #method.stream_position
1768    /// [`fill`]: #method.fill
1769    /// [`Read::read`]: #method.read
1770    pub fn position(&self) -> u64 {
1771        self.inner.counter * BLOCK_LEN as u64 + self.position_within_block as u64
1772    }
1773
1774    /// Seek to a new read position in the output stream. This is equivalent to
1775    /// calling [`Seek::seek`] with [`SeekFrom::Start`], except that it doesn't
1776    /// return a `Result`.
1777    ///
1778    /// [`Seek::seek`]: #method.seek
1779    /// [`SeekFrom::Start`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
1780    pub fn set_position(&mut self, position: u64) {
1781        self.position_within_block = (position % BLOCK_LEN as u64) as u8;
1782        self.inner.counter = position / BLOCK_LEN as u64;
1783    }
1784}
1785
1786// Don't derive(Debug), because the state may be secret.
1787impl fmt::Debug for OutputReader {
1788    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1789        f.debug_struct("OutputReader")
1790            .field("position", &self.position())
1791            .finish()
1792    }
1793}
1794
1795#[cfg(feature = "std")]
1796impl std::io::Read for OutputReader {
1797    #[inline]
1798    fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
1799        self.fill(buf);
1800        Ok(buf.len())
1801    }
1802}
1803
1804#[cfg(feature = "std")]
1805impl std::io::Seek for OutputReader {
1806    fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
1807        let max_position = u64::max_value() as i128;
1808        let target_position: i128 = match pos {
1809            std::io::SeekFrom::Start(x) => x as i128,
1810            std::io::SeekFrom::Current(x) => self.position() as i128 + x as i128,
1811            std::io::SeekFrom::End(_) => {
1812                return Err(std::io::Error::new(
1813                    std::io::ErrorKind::InvalidInput,
1814                    "seek from end not supported",
1815                ));
1816            }
1817        };
1818        if target_position < 0 {
1819            return Err(std::io::Error::new(
1820                std::io::ErrorKind::InvalidInput,
1821                "seek before start",
1822            ));
1823        }
1824        self.set_position(cmp::min(target_position, max_position) as u64);
1825        Ok(self.position())
1826    }
1827}
1828
1829#[cfg(feature = "zeroize")]
1830impl Zeroize for OutputReader {
1831    fn zeroize(&mut self) {
1832        // Destructuring to trigger compile error as a reminder to update this impl.
1833        let Self {
1834            inner,
1835            position_within_block,
1836        } = self;
1837
1838        inner.zeroize();
1839        position_within_block.zeroize();
1840    }
1841}